diff --git a/CRD_VERSIONING.md b/CRD_VERSIONING.md new file mode 100644 index 000000000..2d681f3f5 --- /dev/null +++ b/CRD_VERSIONING.md @@ -0,0 +1,5 @@ +# CRD Versioning + +See Kubebuilder's [Tutorial: Multi-Version API](https://book.kubebuilder.io/multiversion-tutorial/tutorial) for a description of the mechanism. For more detail read the Kubernetes document [Versions in CustomResourceDefinitions](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/). + +This repository uses [crd-bumper](https://github.com/NearNodeFlash/nnf-deploy/tree/master/tools/crd-bumper#readme) to handle CRD versioning. diff --git a/Dockerfile b/Dockerfile index 5499eb6a9..18d3a5a20 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,7 +49,6 @@ WORKDIR /workspace ARG FAILFAST COPY hack/ hack/ -COPY test-tools.sh . COPY Makefile . RUN echo "building test target after copy" && pwd && ls -al diff --git a/Makefile b/Makefile index 4d1db905c..ab63a2ce0 100644 --- a/Makefile +++ b/Makefile @@ -222,17 +222,20 @@ TESTDIRS ?= internal api github/cluster-api FAILFAST ?= no test: manifests generate fmt vet envtest ## Run tests. find internal -name "*.db" -type d -exec rm -rf {} + - source test-tools.sh; prefix_webhook_names config/webhook ${ENVTEST_ASSETS_DIR}/webhook + ./hack/prefix-webhook-names.sh config/webhook ${ENVTEST_ASSETS_DIR}/webhook-nnf nnf + ./hack/prefix-webhook-names.sh vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook ${ENVTEST_ASSETS_DIR}/webhook-lus lus + ./hack/prefix-webhook-names.sh vendor/github.com/DataWorkflowServices/dws/config/webhook ${ENVTEST_ASSETS_DIR}/webhook-dws dws if [[ "${FAILFAST}" == yes ]]; then \ failfast="-ginkgo.fail-fast"; \ fi; \ set -o errexit; \ export GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT=${EVENTUALLY_TIMEOUT}; \ export GOMEGA_DEFAULT_EVENTUALLY_INTERVAL=${EVENTUALLY_INTERVAL}; \ - export WEBHOOK_DIR=${ENVTEST_ASSETS_DIR}/webhook; \ + export WEBHOOK_DIRS=${ENVTEST_ASSETS_DIR}/webhook-nnf:${ENVTEST_ASSETS_DIR}/webhook-lus:${ENVTEST_ASSETS_DIR}/webhook-dws; \ for subdir in ${TESTDIRS}; do \ KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path --bin-dir $(LOCALBIN))" go test -v ./$$subdir/... -coverprofile cover-$$(basename $$subdir.out) -ginkgo.v $$failfast; \ - done + done; \ + rm -rf internal/controller/nnf.db ##@ Build RPM_PLATFORM ?= linux/amd64 diff --git a/PROJECT b/PROJECT index 51a431c0b..c22445e3b 100644 --- a/PROJECT +++ b/PROJECT @@ -62,7 +62,7 @@ resources: namespaced: true domain: cray.hpe.com group: nnf - kind: NnfJobStorageInstance + kind: NnfDataMovement path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: @@ -70,15 +70,16 @@ resources: namespaced: true domain: cray.hpe.com group: nnf - kind: NnfDataMovement + kind: NnfDataMovementManager path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 namespaced: true + controller: true domain: cray.hpe.com group: nnf - kind: NnfDataMovementManager + kind: NnfAccess path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: @@ -87,15 +88,16 @@ resources: controller: true domain: cray.hpe.com group: nnf - kind: NnfAccess + kind: NnfStorageProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 namespaced: true + controller: true domain: cray.hpe.com group: nnf - kind: NnfPersistentStorageInstance + kind: NnfNodeECData path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: @@ -103,7 +105,7 @@ resources: namespaced: true domain: cray.hpe.com group: nnf - kind: NnfDataMovementWorkflow + kind: NnfContainerProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: @@ -112,19 +114,16 @@ resources: controller: true domain: cray.hpe.com group: nnf - kind: NnfStorageProfile + kind: NnfPortManager path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - webhooks: - validation: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true controller: true domain: cray.hpe.com group: nnf - kind: NnfNodeECData + kind: NnfLustreMGT path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: @@ -132,48 +131,170 @@ resources: namespaced: true domain: cray.hpe.com group: nnf - kind: NnfContainerProfile + kind: NnfDataMovementProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - webhooks: - validation: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true controller: true domain: cray.hpe.com group: nnf - kind: NnfPortManager + kind: NnfSystemStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 namespaced: true - controller: true domain: cray.hpe.com group: nnf - kind: NnfLustreMGT - path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 - version: v1alpha1 + kind: NnfAccess + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfContainerProfile + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfDataMovement + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfDataMovementManager + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true domain: cray.hpe.com group: nnf kind: NnfDataMovementProfile - path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 - version: v1alpha1 + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfLustreMGT + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNode + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeBlockStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeECData + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfPortManager + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfStorageProfile + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 webhooks: validation: true webhookVersion: v1 - api: crdVersion: v1 namespaced: true - controller: true domain: cray.hpe.com group: nnf kind: NnfSystemStorage - path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 - version: v1alpha1 + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha2 + version: v1alpha2 + webhooks: + conversion: true + webhookVersion: v1 version: "3" diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go new file mode 100644 index 000000000..022f7104c --- /dev/null +++ b/api/v1alpha1/conversion.go @@ -0,0 +1,615 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + apiconversion "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" + utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" +) + +var convertlog = logf.Log.V(2).WithName("convert-v1alpha1") + +func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfAccess To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfAccess) + + if err := Convert_v1alpha1_NnfAccess_To_v1alpha2_NnfAccess(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfAccess{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfAccess) + convertlog.Info("Convert NnfAccess From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfAccess_To_v1alpha1_NnfAccess(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfContainerProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfContainerProfile) + + if err := Convert_v1alpha1_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfContainerProfile{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfContainerProfile) + convertlog.Info("Convert NnfContainerProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfDataMovement To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfDataMovement) + + if err := Convert_v1alpha1_NnfDataMovement_To_v1alpha2_NnfDataMovement(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfDataMovement{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfDataMovement) + convertlog.Info("Convert NnfDataMovement From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfDataMovement_To_v1alpha1_NnfDataMovement(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfDataMovementManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfDataMovementManager) + + if err := Convert_v1alpha1_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfDataMovementManager{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfDataMovementManager) + convertlog.Info("Convert NnfDataMovementManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfDataMovementProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfDataMovementProfile) + + if err := Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfDataMovementProfile{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfDataMovementProfile) + convertlog.Info("Convert NnfDataMovementProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfLustreMGT To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfLustreMGT) + + if err := Convert_v1alpha1_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfLustreMGT{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfLustreMGT) + convertlog.Info("Convert NnfLustreMGT From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNode To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfNode) + + if err := Convert_v1alpha1_NnfNode_To_v1alpha2_NnfNode(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfNode{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfNode) + convertlog.Info("Convert NnfNode From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfNode_To_v1alpha1_NnfNode(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNodeBlockStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfNodeBlockStorage) + + if err := Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfNodeBlockStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfNodeBlockStorage) + convertlog.Info("Convert NnfNodeBlockStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNodeECData To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfNodeECData) + + if err := Convert_v1alpha1_NnfNodeECData_To_v1alpha2_NnfNodeECData(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfNodeECData{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfNodeECData) + convertlog.Info("Convert NnfNodeECData From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfNodeECData_To_v1alpha1_NnfNodeECData(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNodeStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfNodeStorage) + + if err := Convert_v1alpha1_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfNodeStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfNodeStorage) + convertlog.Info("Convert NnfNodeStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfPortManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfPortManager) + + if err := Convert_v1alpha1_NnfPortManager_To_v1alpha2_NnfPortManager(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfPortManager{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfPortManager) + convertlog.Info("Convert NnfPortManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfPortManager_To_v1alpha1_NnfPortManager(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfStorage) + + if err := Convert_v1alpha1_NnfStorage_To_v1alpha2_NnfStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfStorage) + convertlog.Info("Convert NnfStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfStorage_To_v1alpha1_NnfStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfStorageProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfStorageProfile) + + if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfStorageProfile{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfStorageProfile) + convertlog.Info("Convert NnfStorageProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfSystemStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha2.NnfSystemStorage) + + if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha2.NnfSystemStorage{} + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { + return err + } + + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.ExcludeDisabledRabbits = restored.Spec.ExcludeDisabledRabbits + } else { + dst.Spec.ExcludeDisabledRabbits = false + } + + return nil +} + +func (dst *NnfSystemStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha2.NnfSystemStorage) + convertlog.Info("Convert NnfSystemStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +// The List-based ConvertTo/ConvertFrom routines are never used by the +// conversion webhook, but the conversion-verifier tool wants to see them. +// The conversion-gen tool generated the Convert_X_to_Y routines, should they +// ever be needed. + +func resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: "nnf", Resource: resource} +} + +func (src *NnfAccessList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfAccessList"), "ConvertTo") +} + +func (dst *NnfAccessList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfAccessList"), "ConvertFrom") +} + +func (src *NnfContainerProfileList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfContainerProfileList"), "ConvertTo") +} + +func (dst *NnfContainerProfileList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfContainerProfileList"), "ConvertFrom") +} + +func (src *NnfDataMovementList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementList"), "ConvertTo") +} + +func (dst *NnfDataMovementList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementList"), "ConvertFrom") +} + +func (src *NnfDataMovementManagerList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementManagerList"), "ConvertTo") +} + +func (dst *NnfDataMovementManagerList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementManagerList"), "ConvertFrom") +} + +func (src *NnfDataMovementProfileList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementProfileList"), "ConvertTo") +} + +func (dst *NnfDataMovementProfileList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementProfileList"), "ConvertFrom") +} + +func (src *NnfLustreMGTList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfLustreMGTList"), "ConvertTo") +} + +func (dst *NnfLustreMGTList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfLustreMGTList"), "ConvertFrom") +} + +func (src *NnfNodeList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeList"), "ConvertTo") +} + +func (dst *NnfNodeList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeList"), "ConvertFrom") +} + +func (src *NnfNodeBlockStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeBlockStorageList"), "ConvertTo") +} + +func (dst *NnfNodeBlockStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeBlockStorageList"), "ConvertFrom") +} + +func (src *NnfNodeECDataList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeECDataList"), "ConvertTo") +} + +func (dst *NnfNodeECDataList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeECDataList"), "ConvertFrom") +} + +func (src *NnfNodeStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeStorageList"), "ConvertTo") +} + +func (dst *NnfNodeStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeStorageList"), "ConvertFrom") +} + +func (src *NnfPortManagerList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfPortManagerList"), "ConvertTo") +} + +func (dst *NnfPortManagerList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfPortManagerList"), "ConvertFrom") +} + +func (src *NnfStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageList"), "ConvertTo") +} + +func (dst *NnfStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageList"), "ConvertFrom") +} + +func (src *NnfStorageProfileList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageProfileList"), "ConvertTo") +} + +func (dst *NnfStorageProfileList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageProfileList"), "ConvertFrom") +} + +func (src *NnfSystemStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertTo") +} + +func (dst *NnfSystemStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertFrom") +} + +// The conversion-gen tool dropped these from zz_generated.conversion.go to +// force us to acknowledge that we are addressing the conversion requirements. +func Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *nnfv1alpha2.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in, out, s) +} diff --git a/api/v1alpha1/conversion_test.go b/api/v1alpha1/conversion_test.go new file mode 100644 index 000000000..a076a1a86 --- /dev/null +++ b/api/v1alpha1/conversion_test.go @@ -0,0 +1,107 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" + utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" +) + +func TestFuzzyConversion(t *testing.T) { + + t.Run("for NnfAccess", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfAccess{}, + Spoke: &NnfAccess{}, + })) + + t.Run("for NnfContainerProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfContainerProfile{}, + Spoke: &NnfContainerProfile{}, + })) + + t.Run("for NnfDataMovement", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfDataMovement{}, + Spoke: &NnfDataMovement{}, + })) + + t.Run("for NnfDataMovementManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfDataMovementManager{}, + Spoke: &NnfDataMovementManager{}, + })) + + t.Run("for NnfDataMovementProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfDataMovementProfile{}, + Spoke: &NnfDataMovementProfile{}, + })) + + t.Run("for NnfLustreMGT", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfLustreMGT{}, + Spoke: &NnfLustreMGT{}, + })) + + t.Run("for NnfNode", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfNode{}, + Spoke: &NnfNode{}, + })) + + t.Run("for NnfNodeBlockStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfNodeBlockStorage{}, + Spoke: &NnfNodeBlockStorage{}, + })) + + t.Run("for NnfNodeECData", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfNodeECData{}, + Spoke: &NnfNodeECData{}, + })) + + t.Run("for NnfNodeStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfNodeStorage{}, + Spoke: &NnfNodeStorage{}, + })) + + t.Run("for NnfPortManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfPortManager{}, + Spoke: &NnfPortManager{}, + })) + + t.Run("for NnfStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfStorage{}, + Spoke: &NnfStorage{}, + })) + + t.Run("for NnfStorageProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfStorageProfile{}, + Spoke: &NnfStorageProfile{}, + })) + + t.Run("for NnfSystemStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha2.NnfSystemStorage{}, + Spoke: &NnfSystemStorage{}, + })) + +} + +// Just touch ginkgo, so it's here to interpret any ginkgo args from +// "make test", so that doesn't fail on this test file. +var _ = BeforeSuite(func() {}) diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go new file mode 100644 index 000000000..f75bddf2e --- /dev/null +++ b/api/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// The following tag tells conversion-gen to generate conversion routines, and +// it tells conversion-gen the name of the hub version. +// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha2 +package v1alpha1 diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go index 3541ac9c4..6ab8487d2 100644 --- a/api/v1alpha1/groupversion_info.go +++ b/api/v1alpha1/groupversion_info.go @@ -1,5 +1,5 @@ /* - * Copyright 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -36,4 +36,7 @@ var ( // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme + + // Used by zz_generated.conversion.go. + localSchemeBuilder = SchemeBuilder.SchemeBuilder ) diff --git a/api/v1alpha1/nnf_resource_condition_types.go b/api/v1alpha1/nnf_resource_condition_types.go index e6789f453..ad0da3903 100644 --- a/api/v1alpha1/nnf_resource_condition_types.go +++ b/api/v1alpha1/nnf_resource_condition_types.go @@ -1,3 +1,22 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package v1alpha1 import ( diff --git a/api/v1alpha1/nnf_access_types.go b/api/v1alpha1/nnfaccess_types.go similarity index 100% rename from api/v1alpha1/nnf_access_types.go rename to api/v1alpha1/nnfaccess_types.go diff --git a/api/v1alpha1/nnf_datamovement_types.go b/api/v1alpha1/nnfdatamovement_types.go similarity index 100% rename from api/v1alpha1/nnf_datamovement_types.go rename to api/v1alpha1/nnfdatamovement_types.go diff --git a/api/v1alpha1/nnf_datamovementmanager_types.go b/api/v1alpha1/nnfdatamovementmanager_types.go similarity index 100% rename from api/v1alpha1/nnf_datamovementmanager_types.go rename to api/v1alpha1/nnfdatamovementmanager_types.go diff --git a/api/v1alpha1/nnf_lustre_mgt.go b/api/v1alpha1/nnflustremgt_types.go similarity index 100% rename from api/v1alpha1/nnf_lustre_mgt.go rename to api/v1alpha1/nnflustremgt_types.go diff --git a/api/v1alpha1/nnf_node_types.go b/api/v1alpha1/nnfnode_types.go similarity index 100% rename from api/v1alpha1/nnf_node_types.go rename to api/v1alpha1/nnfnode_types.go diff --git a/api/v1alpha1/nnf_node_block_storage_types.go b/api/v1alpha1/nnfnodeblockstorage_types.go similarity index 100% rename from api/v1alpha1/nnf_node_block_storage_types.go rename to api/v1alpha1/nnfnodeblockstorage_types.go diff --git a/api/v1alpha1/nnf_node_ec_data_types.go b/api/v1alpha1/nnfnodeecdata_types.go similarity index 100% rename from api/v1alpha1/nnf_node_ec_data_types.go rename to api/v1alpha1/nnfnodeecdata_types.go diff --git a/api/v1alpha1/nnf_node_storage_types.go b/api/v1alpha1/nnfnodestorage_types.go similarity index 98% rename from api/v1alpha1/nnf_node_storage_types.go rename to api/v1alpha1/nnfnodestorage_types.go index 952622ee2..72770c951 100644 --- a/api/v1alpha1/nnf_node_storage_types.go +++ b/api/v1alpha1/nnfnodestorage_types.go @@ -54,7 +54,7 @@ type NnfNodeStorageSpec struct { // block device. // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre // +kubebuilder:default:=raw - FileSystemType string `json:"fileSystemType"` + FileSystemType string `json:"fileSystemType,omitempty"` // LustreStorageSpec describes the Lustre target created here, if // FileSystemType specifies a Lustre target. diff --git a/api/v1alpha1/nnf_port_manager_types.go b/api/v1alpha1/nnfportmanager_types.go similarity index 100% rename from api/v1alpha1/nnf_port_manager_types.go rename to api/v1alpha1/nnfportmanager_types.go diff --git a/api/v1alpha1/nnf_storage_types.go b/api/v1alpha1/nnfstorage_types.go similarity index 99% rename from api/v1alpha1/nnf_storage_types.go rename to api/v1alpha1/nnfstorage_types.go index 63bdb9dff..4eca1eaed 100644 --- a/api/v1alpha1/nnf_storage_types.go +++ b/api/v1alpha1/nnfstorage_types.go @@ -88,7 +88,7 @@ type NnfStorageSpec struct { // block device. // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre // +kubebuilder:default:=raw - FileSystemType string `json:"fileSystemType"` + FileSystemType string `json:"fileSystemType,omitempty"` // User ID for file system UserID uint32 `json:"userID"` diff --git a/api/v1alpha1/nnfsystemstorage_types.go b/api/v1alpha1/nnfsystemstorage_types.go index 6eb37afff..aec7a73fb 100644 --- a/api/v1alpha1/nnfsystemstorage_types.go +++ b/api/v1alpha1/nnfsystemstorage_types.go @@ -60,7 +60,7 @@ type NnfSystemStorageSpec struct { // ComputesTarget specifies which computes to make the storage accessible to // +kubebuilder:validation:Enum=all;even;odd;pattern // +kubebuilder:default:=all - ComputesTarget NnfSystemStorageComputesTarget `json:"computesTarget"` + ComputesTarget NnfSystemStorageComputesTarget `json:"computesTarget,omitempty"` // ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This // is only used if ComputesTarget is "pattern" @@ -76,7 +76,7 @@ type NnfSystemStorageSpec struct { // Type is the file system type to use for the storage allocation // +kubebuilder:validation:Enum=raw;xfs;gfs2 // +kubebuilder:default:=raw - Type string `json:"type"` + Type string `json:"type,omitempty"` // StorageProfile is an object reference to the storage profile to use StorageProfile corev1.ObjectReference `json:"storageProfile"` diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..2670f814e --- /dev/null +++ b/api/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,3213 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + apiv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + v1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" + v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha2.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha2.LustreStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(a.(*v1alpha2.LustreStorageSpec), b.(*LustreStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha2.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccess_To_v1alpha2_NnfAccess(a.(*NnfAccess), b.(*v1alpha2.NnfAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccess_To_v1alpha1_NnfAccess(a.(*v1alpha2.NnfAccess), b.(*NnfAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha2.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccessList_To_v1alpha2_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha2.NnfAccessList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccessList_To_v1alpha1_NnfAccessList(a.(*v1alpha2.NnfAccessList), b.(*NnfAccessList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha2.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha2.NnfAccessSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(a.(*v1alpha2.NnfAccessSpec), b.(*NnfAccessSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha2.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha2.NnfAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(a.(*v1alpha2.NnfAccessStatus), b.(*NnfAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha2.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha2.NnfContainerProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(a.(*v1alpha2.NnfContainerProfile), b.(*NnfContainerProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha2.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha2.NnfContainerProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(a.(*v1alpha2.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha2.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha2.NnfContainerProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(a.(*v1alpha2.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha2.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha2.NnfContainerProfileStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(a.(*v1alpha2.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha2.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovement_To_v1alpha2_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha2.NnfDataMovement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovement_To_v1alpha1_NnfDataMovement(a.(*v1alpha2.NnfDataMovement), b.(*NnfDataMovement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha2.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha2.NnfDataMovementCommandStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(a.(*v1alpha2.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha2.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha2.NnfDataMovementConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(a.(*v1alpha2.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha2.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha2.NnfDataMovementList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(a.(*v1alpha2.NnfDataMovementList), b.(*NnfDataMovementList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha2.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha2.NnfDataMovementManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(a.(*v1alpha2.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha2.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha2.NnfDataMovementManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(a.(*v1alpha2.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha2.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha2.NnfDataMovementManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(a.(*v1alpha2.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha2.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha2.NnfDataMovementManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(a.(*v1alpha2.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha2.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha2.NnfDataMovementProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(a.(*v1alpha2.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha2.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha2.NnfDataMovementProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(a.(*v1alpha2.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha2.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha2.NnfDataMovementProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(a.(*v1alpha2.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha2.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha2.NnfDataMovementSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(a.(*v1alpha2.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha2.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha2.NnfDataMovementSpecSourceDestination), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(a.(*v1alpha2.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha2.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha2.NnfDataMovementStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(a.(*v1alpha2.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha2.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha2.NnfDriveStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(a.(*v1alpha2.NnfDriveStatus), b.(*NnfDriveStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha2.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha2.NnfLustreMGT), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(a.(*v1alpha2.NnfLustreMGT), b.(*NnfLustreMGT), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha2.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha2.NnfLustreMGTList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(a.(*v1alpha2.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha2.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha2.NnfLustreMGTSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(a.(*v1alpha2.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha2.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha2.NnfLustreMGTStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(a.(*v1alpha2.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha2.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha2.NnfLustreMGTStatusClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(a.(*v1alpha2.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha2.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNode_To_v1alpha2_NnfNode(a.(*NnfNode), b.(*v1alpha2.NnfNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNode_To_v1alpha1_NnfNode(a.(*v1alpha2.NnfNode), b.(*NnfNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha2.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha2.NnfNodeBlockStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(a.(*v1alpha2.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha2.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha2.NnfNodeBlockStorageAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(a.(*v1alpha2.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha2.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha2.NnfNodeBlockStorageAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha2.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha2.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha2.NnfNodeBlockStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha2.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha2.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha2.NnfNodeBlockStorageDeviceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha2.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha2.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha2.NnfNodeBlockStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(a.(*v1alpha2.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha2.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha2.NnfNodeBlockStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(a.(*v1alpha2.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha2.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha2.NnfNodeBlockStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(a.(*v1alpha2.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha2.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECData_To_v1alpha2_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha2.NnfNodeECData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECData_To_v1alpha1_NnfNodeECData(a.(*v1alpha2.NnfNodeECData), b.(*NnfNodeECData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha2.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha2.NnfNodeECDataList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(a.(*v1alpha2.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha2.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha2.NnfNodeECDataSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(a.(*v1alpha2.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha2.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha2.NnfNodeECDataStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(a.(*v1alpha2.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha2.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeList_To_v1alpha2_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha2.NnfNodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeList_To_v1alpha1_NnfNodeList(a.(*v1alpha2.NnfNodeList), b.(*NnfNodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha2.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha2.NnfNodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(a.(*v1alpha2.NnfNodeSpec), b.(*NnfNodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha2.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha2.NnfNodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(a.(*v1alpha2.NnfNodeStatus), b.(*NnfNodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha2.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha2.NnfNodeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(a.(*v1alpha2.NnfNodeStorage), b.(*NnfNodeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha2.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha2.NnfNodeStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(a.(*v1alpha2.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha2.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha2.NnfNodeStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(a.(*v1alpha2.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha2.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha2.NnfNodeStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(a.(*v1alpha2.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha2.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha2.NnfNodeStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(a.(*v1alpha2.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha2.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManager_To_v1alpha2_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha2.NnfPortManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManager_To_v1alpha1_NnfPortManager(a.(*v1alpha2.NnfPortManager), b.(*NnfPortManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha2.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha2.NnfPortManagerAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(a.(*v1alpha2.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha2.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha2.NnfPortManagerAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(a.(*v1alpha2.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha2.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha2.NnfPortManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(a.(*v1alpha2.NnfPortManagerList), b.(*NnfPortManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha2.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha2.NnfPortManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(a.(*v1alpha2.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha2.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha2.NnfPortManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(a.(*v1alpha2.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha2.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha2.NnfResourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(a.(*v1alpha2.NnfResourceStatus), b.(*NnfResourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha2.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfServerStatus_To_v1alpha2_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha2.NnfServerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfServerStatus_To_v1alpha1_NnfServerStatus(a.(*v1alpha2.NnfServerStatus), b.(*NnfServerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha2.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorage_To_v1alpha2_NnfStorage(a.(*NnfStorage), b.(*v1alpha2.NnfStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorage_To_v1alpha1_NnfStorage(a.(*v1alpha2.NnfStorage), b.(*NnfStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha2.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha2.NnfStorageAllocationNodes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(a.(*v1alpha2.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha2.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha2.NnfStorageAllocationSetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(a.(*v1alpha2.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha2.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha2.NnfStorageAllocationSetStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(a.(*v1alpha2.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha2.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageList_To_v1alpha2_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha2.NnfStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageList_To_v1alpha1_NnfStorageList(a.(*v1alpha2.NnfStorageList), b.(*NnfStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha2.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha2.NnfStorageLustreSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(a.(*v1alpha2.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha2.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha2.NnfStorageLustreStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(a.(*v1alpha2.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha2.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha2.NnfStorageProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(a.(*v1alpha2.NnfStorageProfile), b.(*NnfStorageProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha2.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha2.NnfStorageProfileCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(a.(*v1alpha2.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha2.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha2.NnfStorageProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(a.(*v1alpha2.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha2.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha2.NnfStorageProfileGFS2Data), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(a.(*v1alpha2.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha2.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha2.NnfStorageProfileLVMLvChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha2.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha2.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha2.NnfStorageProfileLVMVgChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha2.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha2.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha2.NnfStorageProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(a.(*v1alpha2.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha2.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha2.NnfStorageProfileLustreCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(a.(*v1alpha2.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha2.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha2.NnfStorageProfileLustreData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(a.(*v1alpha2.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha2.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha2.NnfStorageProfileLustreMiscOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(a.(*v1alpha2.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha2.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha2.NnfStorageProfileRawData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(a.(*v1alpha2.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha2.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha2.NnfStorageProfileXFSData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(a.(*v1alpha2.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha2.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha2.NnfStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(a.(*v1alpha2.NnfStorageSpec), b.(*NnfStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha2.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha2.NnfStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(a.(*v1alpha2.NnfStorageStatus), b.(*NnfStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha2.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha2.NnfSystemStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(a.(*v1alpha2.NnfSystemStorage), b.(*NnfSystemStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha2.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha2.NnfSystemStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(a.(*v1alpha2.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha2.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha2.NnfSystemStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha2.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha2.NnfSystemStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(a.(*v1alpha2.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha2.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(a.(*v1alpha2.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha2.LustreStorageSpec, s conversion.Scope) error { + out.FileSystemName = in.FileSystemName + out.TargetType = in.TargetType + out.StartIndex = in.StartIndex + out.MgsAddress = in.MgsAddress + out.BackFs = in.BackFs + return nil +} + +// Convert_v1alpha1_LustreStorageSpec_To_v1alpha2_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha2.LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in, out, s) +} + +func autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha2.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { + out.FileSystemName = in.FileSystemName + out.TargetType = in.TargetType + out.StartIndex = in.StartIndex + out.MgsAddress = in.MgsAddress + out.BackFs = in.BackFs + return nil +} + +// Convert_v1alpha2_LustreStorageSpec_To_v1alpha1_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha2.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfAccess_To_v1alpha2_NnfAccess(in *NnfAccess, out *v1alpha2.NnfAccess, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfAccess_To_v1alpha2_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccess_To_v1alpha2_NnfAccess(in *NnfAccess, out *v1alpha2.NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccess_To_v1alpha2_NnfAccess(in, out, s) +} + +func autoConvert_v1alpha2_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha2.NnfAccess, out *NnfAccess, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfAccess_To_v1alpha1_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha2.NnfAccess, out *NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccess_To_v1alpha1_NnfAccess(in, out, s) +} + +func autoConvert_v1alpha1_NnfAccessList_To_v1alpha2_NnfAccessList(in *NnfAccessList, out *v1alpha2.NnfAccessList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfAccess)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfAccessList_To_v1alpha2_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccessList_To_v1alpha2_NnfAccessList(in *NnfAccessList, out *v1alpha2.NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccessList_To_v1alpha2_NnfAccessList(in, out, s) +} + +func autoConvert_v1alpha2_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha2.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfAccessList_To_v1alpha1_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha2.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccessList_To_v1alpha1_NnfAccessList(in, out, s) +} + +func autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha2.NnfAccessSpec, s conversion.Scope) error { + out.DesiredState = in.DesiredState + out.TeardownState = apiv1alpha2.WorkflowState(in.TeardownState) + out.Target = in.Target + out.UserID = in.UserID + out.GroupID = in.GroupID + out.ClientReference = in.ClientReference + out.MountPath = in.MountPath + out.MakeClientMounts = in.MakeClientMounts + out.MountPathPrefix = in.MountPathPrefix + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha1_NnfAccessSpec_To_v1alpha2_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha2.NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha2.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { + out.DesiredState = in.DesiredState + out.TeardownState = apiv1alpha2.WorkflowState(in.TeardownState) + out.Target = in.Target + out.UserID = in.UserID + out.GroupID = in.GroupID + out.ClientReference = in.ClientReference + out.MountPath = in.MountPath + out.MakeClientMounts = in.MakeClientMounts + out.MountPathPrefix = in.MountPathPrefix + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha2_NnfAccessSpec_To_v1alpha1_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha2.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha2.NnfAccessStatus, s conversion.Scope) error { + out.State = in.State + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha1_NnfAccessStatus_To_v1alpha2_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha2.NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha2.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { + out.State = in.State + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha2_NnfAccessStatus_To_v1alpha1_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha2.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha2.NnfContainerProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfContainerProfile_To_v1alpha2_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha2.NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in, out, s) +} + +func autoConvert_v1alpha2_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha2.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfContainerProfile_To_v1alpha1_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha2.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in, out, s) +} + +func autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha2.NnfContainerProfileData, s conversion.Scope) error { + out.Pinned = in.Pinned + out.Storages = *(*[]v1alpha2.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) + out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) + out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) + out.RetryLimit = in.RetryLimit + out.UserID = (*uint32)(unsafe.Pointer(in.UserID)) + out.GroupID = (*uint32)(unsafe.Pointer(in.GroupID)) + out.NumPorts = in.NumPorts + out.Spec = (*v1.PodSpec)(unsafe.Pointer(in.Spec)) + out.MPISpec = (*v2beta1.MPIJobSpec)(unsafe.Pointer(in.MPISpec)) + return nil +} + +// Convert_v1alpha1_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha2.NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in, out, s) +} + +func autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha2.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { + out.Pinned = in.Pinned + out.Storages = *(*[]NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) + out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) + out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) + out.RetryLimit = in.RetryLimit + out.UserID = (*uint32)(unsafe.Pointer(in.UserID)) + out.GroupID = (*uint32)(unsafe.Pointer(in.GroupID)) + out.NumPorts = in.NumPorts + out.Spec = (*v1.PodSpec)(unsafe.Pointer(in.Spec)) + out.MPISpec = (*v2beta1.MPIJobSpec)(unsafe.Pointer(in.MPISpec)) + return nil +} + +// Convert_v1alpha2_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha2.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in, out, s) +} + +func autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha2.NnfContainerProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfContainerProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha2.NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in, out, s) +} + +func autoConvert_v1alpha2_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha2.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfContainerProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha2.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in, out, s) +} + +func autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha2.NnfContainerProfileStorage, s conversion.Scope) error { + out.Name = in.Name + out.Optional = in.Optional + out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) + return nil +} + +// Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha2.NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in, out, s) +} + +func autoConvert_v1alpha2_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha2.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { + out.Name = in.Name + out.Optional = in.Optional + out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) + return nil +} + +// Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha2.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovement_To_v1alpha2_NnfDataMovement(in *NnfDataMovement, out *v1alpha2.NnfDataMovement, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfDataMovement_To_v1alpha2_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovement_To_v1alpha2_NnfDataMovement(in *NnfDataMovement, out *v1alpha2.NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovement_To_v1alpha2_NnfDataMovement(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha2.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfDataMovement_To_v1alpha1_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha2.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovement_To_v1alpha1_NnfDataMovement(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha2.NnfDataMovementCommandStatus, s conversion.Scope) error { + out.Command = in.Command + out.ElapsedTime = in.ElapsedTime + out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) + out.LastMessage = in.LastMessage + out.LastMessageTime = in.LastMessageTime + out.Seconds = in.Seconds + out.Items = (*int32)(unsafe.Pointer(in.Items)) + out.Directories = (*int32)(unsafe.Pointer(in.Directories)) + out.Files = (*int32)(unsafe.Pointer(in.Files)) + out.Links = (*int32)(unsafe.Pointer(in.Links)) + out.Data = in.Data + out.Rate = in.Rate + return nil +} + +// Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha2.NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha2.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { + out.Command = in.Command + out.ElapsedTime = in.ElapsedTime + out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) + out.LastMessage = in.LastMessage + out.LastMessageTime = in.LastMessageTime + out.Seconds = in.Seconds + out.Items = (*int32)(unsafe.Pointer(in.Items)) + out.Directories = (*int32)(unsafe.Pointer(in.Directories)) + out.Files = (*int32)(unsafe.Pointer(in.Files)) + out.Links = (*int32)(unsafe.Pointer(in.Links)) + out.Data = in.Data + out.Rate = in.Rate + return nil +} + +// Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha2.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha2.NnfDataMovementConfig, s conversion.Scope) error { + out.Dryrun = in.Dryrun + out.MpirunOptions = in.MpirunOptions + out.DcpOptions = in.DcpOptions + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.Slots = (*int)(unsafe.Pointer(in.Slots)) + out.MaxSlots = (*int)(unsafe.Pointer(in.MaxSlots)) + return nil +} + +// Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha2.NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha2.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { + out.Dryrun = in.Dryrun + out.MpirunOptions = in.MpirunOptions + out.DcpOptions = in.DcpOptions + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.Slots = (*int)(unsafe.Pointer(in.Slots)) + out.MaxSlots = (*int)(unsafe.Pointer(in.MaxSlots)) + return nil +} + +// Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha2.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha2.NnfDataMovementList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfDataMovement)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfDataMovementList_To_v1alpha2_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha2.NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha2.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfDataMovement)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfDataMovementList_To_v1alpha1_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha2.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha2.NnfDataMovementManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha2.NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha2.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha2.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha2.NnfDataMovementManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha2.NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha2.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfDataMovementManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha2.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha2.NnfDataMovementManagerSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Template = in.Template + out.UpdateStrategy = in.UpdateStrategy + out.HostPath = in.HostPath + out.MountPath = in.MountPath + return nil +} + +// Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha2.NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha2.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Template = in.Template + out.UpdateStrategy = in.UpdateStrategy + out.HostPath = in.HostPath + out.MountPath = in.MountPath + return nil +} + +// Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha2.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha2.NnfDataMovementManagerStatus, s conversion.Scope) error { + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha2.NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha2.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha2.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha2.NnfDataMovementProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha2.NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha2.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha2.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha2.NnfDataMovementProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + out.Slots = in.Slots + out.MaxSlots = in.MaxSlots + out.Command = in.Command + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.ProgressIntervalSeconds = in.ProgressIntervalSeconds + out.CreateDestDir = in.CreateDestDir + out.StatCommand = in.StatCommand + return nil +} + +// Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha2.NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha2.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + out.Slots = in.Slots + out.MaxSlots = in.MaxSlots + out.Command = in.Command + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.ProgressIntervalSeconds = in.ProgressIntervalSeconds + out.CreateDestDir = in.CreateDestDir + out.StatCommand = in.StatCommand + return nil +} + +// Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha2.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha2.NnfDataMovementProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha2.NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha2.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha2.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha2.NnfDataMovementSpec, s conversion.Scope) error { + out.Source = (*v1alpha2.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) + out.Destination = (*v1alpha2.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) + out.UserId = in.UserId + out.GroupId = in.GroupId + out.Cancel = in.Cancel + out.ProfileReference = in.ProfileReference + out.UserConfig = (*v1alpha2.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) + return nil +} + +// Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha2.NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha2.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { + out.Source = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) + out.Destination = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) + out.UserId = in.UserId + out.GroupId = in.GroupId + out.Cancel = in.Cancel + out.ProfileReference = in.ProfileReference + out.UserConfig = (*NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) + return nil +} + +// Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha2.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha2.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + out.Path = in.Path + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha2.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha2.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + out.Path = in.Path + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha2.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in, out, s) +} + +func autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha2.NnfDataMovementStatus, s conversion.Scope) error { + out.State = in.State + out.Status = in.Status + out.Message = in.Message + out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) + out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) + out.Restarts = in.Restarts + out.CommandStatus = (*v1alpha2.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha2.NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha2.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { + out.State = in.State + out.Status = in.Status + out.Message = in.Message + out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) + out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) + out.Restarts = in.Restarts + out.CommandStatus = (*NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha2.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha2.NnfDriveStatus, s conversion.Scope) error { + out.Model = in.Model + out.SerialNumber = in.SerialNumber + out.FirmwareVersion = in.FirmwareVersion + out.Slot = in.Slot + out.Capacity = in.Capacity + out.WearLevel = in.WearLevel + if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfDriveStatus_To_v1alpha2_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha2.NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha2.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { + out.Model = in.Model + out.SerialNumber = in.SerialNumber + out.FirmwareVersion = in.FirmwareVersion + out.Slot = in.Slot + out.Capacity = in.Capacity + out.WearLevel = in.WearLevel + if err := Convert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfDriveStatus_To_v1alpha1_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha2.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha2.NnfLustreMGT, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfLustreMGT_To_v1alpha2_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha2.NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in, out, s) +} + +func autoConvert_v1alpha2_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha2.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfLustreMGT_To_v1alpha1_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha2.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in, out, s) +} + +func autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha2.NnfLustreMGTList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfLustreMGT)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha2.NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in, out, s) +} + +func autoConvert_v1alpha2_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha2.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfLustreMGT)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha2.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in, out, s) +} + +func autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha2.NnfLustreMGTSpec, s conversion.Scope) error { + out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) + out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) + out.FsNameStart = in.FsNameStart + out.FsNameStartReference = in.FsNameStartReference + out.ClaimList = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ClaimList)) + return nil +} + +// Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha2.NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha2.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { + out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) + out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) + out.FsNameStart = in.FsNameStart + out.FsNameStartReference = in.FsNameStartReference + out.ClaimList = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ClaimList)) + return nil +} + +// Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha2.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha2.NnfLustreMGTStatus, s conversion.Scope) error { + out.FsNameNext = in.FsNameNext + out.ClaimList = *(*[]v1alpha2.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha2.NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha2.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { + out.FsNameNext = in.FsNameNext + out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha2.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha2.NnfLustreMGTStatusClaim, s conversion.Scope) error { + out.Reference = in.Reference + out.FsName = in.FsName + return nil +} + +// Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha2.NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in, out, s) +} + +func autoConvert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha2.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { + out.Reference = in.Reference + out.FsName = in.FsName + return nil +} + +// Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha2.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in, out, s) +} + +func autoConvert_v1alpha1_NnfNode_To_v1alpha2_NnfNode(in *NnfNode, out *v1alpha2.NnfNode, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfNode_To_v1alpha2_NnfNode is an autogenerated conversion function. +func Convert_v1alpha1_NnfNode_To_v1alpha2_NnfNode(in *NnfNode, out *v1alpha2.NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNode_To_v1alpha2_NnfNode(in, out, s) +} + +func autoConvert_v1alpha2_NnfNode_To_v1alpha1_NnfNode(in *v1alpha2.NnfNode, out *NnfNode, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfNode_To_v1alpha1_NnfNode is an autogenerated conversion function. +func Convert_v1alpha2_NnfNode_To_v1alpha1_NnfNode(in *v1alpha2.NnfNode, out *NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNode_To_v1alpha1_NnfNode(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha2.NnfNodeBlockStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha2.NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha2.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha2.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha2.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) + out.StorageGroupId = in.StorageGroupId + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha2.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha2.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) + out.StorageGroupId = in.StorageGroupId + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha2.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha2.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + out.Capacity = in.Capacity + out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha2.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha2.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + out.Capacity = in.Capacity + out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha2.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha2.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + out.Accesses = *(*map[string]v1alpha2.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) + out.Devices = *(*[]v1alpha2.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) + out.CapacityAllocated = in.CapacityAllocated + out.StoragePoolId = in.StoragePoolId + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha2.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha2.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + out.Accesses = *(*map[string]NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) + out.Devices = *(*[]NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) + out.CapacityAllocated = in.CapacityAllocated + out.StoragePoolId = in.StoragePoolId + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha2.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha2.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + out.NQN = in.NQN + out.NamespaceId = in.NamespaceId + out.CapacityAllocated = in.CapacityAllocated + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha2.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha2.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + out.NQN = in.NQN + out.NamespaceId = in.NamespaceId + out.CapacityAllocated = in.CapacityAllocated + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha2.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha2.NnfNodeBlockStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha2.NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha2.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha2.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha2.NnfNodeBlockStorageSpec, s conversion.Scope) error { + out.SharedAllocation = in.SharedAllocation + out.Allocations = *(*[]v1alpha2.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha2.NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha2.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { + out.SharedAllocation = in.SharedAllocation + out.Allocations = *(*[]NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha2.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha2.NnfNodeBlockStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha2.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.ResourceError = in.ResourceError + out.PodStartTime = in.PodStartTime + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha2.NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha2.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.ResourceError = in.ResourceError + out.PodStartTime = in.PodStartTime + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha2.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeECData_To_v1alpha2_NnfNodeECData(in *NnfNodeECData, out *v1alpha2.NnfNodeECData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfNodeECData_To_v1alpha2_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECData_To_v1alpha2_NnfNodeECData(in *NnfNodeECData, out *v1alpha2.NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECData_To_v1alpha2_NnfNodeECData(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha2.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfNodeECData_To_v1alpha1_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha2.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECData_To_v1alpha1_NnfNodeECData(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha2.NnfNodeECDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfNodeECData)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha2.NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha2.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNodeECData)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha2.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha2.NnfNodeECDataSpec, s conversion.Scope) error { + return nil +} + +// Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha2.NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha2.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { + return nil +} + +// Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha2.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha2.NnfNodeECDataStatus, s conversion.Scope) error { + out.Data = *(*map[string]v1alpha2.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha2.NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha2.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { + out.Data = *(*map[string]NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha2.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeList_To_v1alpha2_NnfNodeList(in *NnfNodeList, out *v1alpha2.NnfNodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfNode)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfNodeList_To_v1alpha2_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeList_To_v1alpha2_NnfNodeList(in *NnfNodeList, out *v1alpha2.NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeList_To_v1alpha2_NnfNodeList(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha2.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNode)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfNodeList_To_v1alpha1_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha2.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeList_To_v1alpha1_NnfNodeList(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha2.NnfNodeSpec, s conversion.Scope) error { + out.Name = in.Name + out.Pod = in.Pod + out.State = v1alpha2.NnfResourceStateType(in.State) + return nil +} + +// Convert_v1alpha1_NnfNodeSpec_To_v1alpha2_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha2.NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha2.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { + out.Name = in.Name + out.Pod = in.Pod + out.State = NnfResourceStateType(in.State) + return nil +} + +// Convert_v1alpha2_NnfNodeSpec_To_v1alpha1_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha2.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha2.NnfNodeStatus, s conversion.Scope) error { + out.Status = v1alpha2.NnfResourceStatusType(in.Status) + out.Health = v1alpha2.NnfResourceHealthType(in.Health) + out.Fenced = in.Fenced + out.LNetNid = in.LNetNid + out.Capacity = in.Capacity + out.CapacityAllocated = in.CapacityAllocated + out.Servers = *(*[]v1alpha2.NnfServerStatus)(unsafe.Pointer(&in.Servers)) + out.Drives = *(*[]v1alpha2.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) + return nil +} + +// Convert_v1alpha1_NnfNodeStatus_To_v1alpha2_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha2.NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha2.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { + out.Status = NnfResourceStatusType(in.Status) + out.Health = NnfResourceHealthType(in.Health) + out.Fenced = in.Fenced + out.LNetNid = in.LNetNid + out.Capacity = in.Capacity + out.CapacityAllocated = in.CapacityAllocated + out.Servers = *(*[]NnfServerStatus)(unsafe.Pointer(&in.Servers)) + out.Drives = *(*[]NnfDriveStatus)(unsafe.Pointer(&in.Drives)) + return nil +} + +// Convert_v1alpha2_NnfNodeStatus_To_v1alpha1_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha2.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha2.NnfNodeStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfNodeStorage_To_v1alpha2_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha2.NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha2.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfNodeStorage_To_v1alpha1_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha2.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha2.NnfNodeStorageAllocationStatus, s conversion.Scope) error { + out.VolumeGroup = in.VolumeGroup + out.LogicalVolume = in.LogicalVolume + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha2.NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha2.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { + out.VolumeGroup = in.VolumeGroup + out.LogicalVolume = in.LogicalVolume + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha2.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha2.NnfNodeStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfNodeStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha2.NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha2.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha2.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha2.NnfNodeStorageSpec, s conversion.Scope) error { + out.Count = in.Count + out.SharedAllocation = in.SharedAllocation + out.Capacity = in.Capacity + out.UserID = in.UserID + out.GroupID = in.GroupID + out.FileSystemType = in.FileSystemType + if err := Convert_v1alpha1_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + out.BlockReference = in.BlockReference + return nil +} + +// Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha2.NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha2.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { + out.Count = in.Count + out.SharedAllocation = in.SharedAllocation + out.Capacity = in.Capacity + out.UserID = in.UserID + out.GroupID = in.GroupID + out.FileSystemType = in.FileSystemType + if err := Convert_v1alpha2_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + out.BlockReference = in.BlockReference + return nil +} + +// Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha2.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha2.NnfNodeStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha2.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha2.NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha2.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha2.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfPortManager_To_v1alpha2_NnfPortManager(in *NnfPortManager, out *v1alpha2.NnfPortManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfPortManager_To_v1alpha2_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManager_To_v1alpha2_NnfPortManager(in *NnfPortManager, out *v1alpha2.NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManager_To_v1alpha2_NnfPortManager(in, out, s) +} + +func autoConvert_v1alpha2_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha2.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfPortManager_To_v1alpha1_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha2.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManager_To_v1alpha1_NnfPortManager(in, out, s) +} + +func autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha2.NnfPortManagerAllocationSpec, s conversion.Scope) error { + out.Requester = in.Requester + out.Count = in.Count + return nil +} + +// Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha2.NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha2.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { + out.Requester = in.Requester + out.Count = in.Count + return nil +} + +// Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha2.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha2.NnfPortManagerAllocationStatus, s conversion.Scope) error { + out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) + out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) + out.Status = v1alpha2.NnfPortManagerAllocationStatusStatus(in.Status) + out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) + return nil +} + +// Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha2.NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha2.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { + out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) + out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) + out.Status = NnfPortManagerAllocationStatusStatus(in.Status) + out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) + return nil +} + +// Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha2.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha2.NnfPortManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfPortManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfPortManagerList_To_v1alpha2_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha2.NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in, out, s) +} + +func autoConvert_v1alpha2_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha2.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfPortManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfPortManagerList_To_v1alpha1_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha2.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in, out, s) +} + +func autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha2.NnfPortManagerSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.Allocations = *(*[]v1alpha2.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha2.NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha2.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.Allocations = *(*[]NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha2.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha2.NnfPortManagerStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha2.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Status = v1alpha2.NnfPortManagerStatusStatus(in.Status) + return nil +} + +// Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha2.NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha2.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { + out.Allocations = *(*[]NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Status = NnfPortManagerStatusStatus(in.Status) + return nil +} + +// Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha2.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha2.NnfResourceStatus, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Status = v1alpha2.NnfResourceStatusType(in.Status) + out.Health = v1alpha2.NnfResourceHealthType(in.Health) + return nil +} + +// Convert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha2.NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha2.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Status = NnfResourceStatusType(in.Status) + out.Health = NnfResourceHealthType(in.Health) + return nil +} + +// Convert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha2.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfServerStatus_To_v1alpha2_NnfServerStatus(in *NnfServerStatus, out *v1alpha2.NnfServerStatus, s conversion.Scope) error { + out.Hostname = in.Hostname + if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfServerStatus_To_v1alpha2_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfServerStatus_To_v1alpha2_NnfServerStatus(in *NnfServerStatus, out *v1alpha2.NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfServerStatus_To_v1alpha2_NnfServerStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha2.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { + out.Hostname = in.Hostname + if err := Convert_v1alpha2_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfServerStatus_To_v1alpha1_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha2.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfServerStatus_To_v1alpha1_NnfServerStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorage_To_v1alpha2_NnfStorage(in *NnfStorage, out *v1alpha2.NnfStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfStorage_To_v1alpha2_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorage_To_v1alpha2_NnfStorage(in *NnfStorage, out *v1alpha2.NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorage_To_v1alpha2_NnfStorage(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha2.NnfStorage, out *NnfStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfStorage_To_v1alpha1_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha2.NnfStorage, out *NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorage_To_v1alpha1_NnfStorage(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha2.NnfStorageAllocationNodes, s conversion.Scope) error { + out.Name = in.Name + out.Count = in.Count + return nil +} + +// Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha2.NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha2.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { + out.Name = in.Name + out.Count = in.Count + return nil +} + +// Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha2.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha2.NnfStorageAllocationSetSpec, s conversion.Scope) error { + out.Name = in.Name + out.Capacity = in.Capacity + if err := Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + return err + } + out.SharedAllocation = in.SharedAllocation + out.Nodes = *(*[]v1alpha2.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha2.NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha2.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { + out.Name = in.Name + out.Capacity = in.Capacity + if err := Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + return err + } + out.SharedAllocation = in.SharedAllocation + out.Nodes = *(*[]NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha2.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha2.NnfStorageAllocationSetStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.AllocationCount = in.AllocationCount + return nil +} + +// Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha2.NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha2.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.AllocationCount = in.AllocationCount + return nil +} + +// Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha2.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageList_To_v1alpha2_NnfStorageList(in *NnfStorageList, out *v1alpha2.NnfStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfStorageList_To_v1alpha2_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageList_To_v1alpha2_NnfStorageList(in *NnfStorageList, out *v1alpha2.NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageList_To_v1alpha2_NnfStorageList(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha2.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfStorageList_To_v1alpha1_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha2.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageList_To_v1alpha1_NnfStorageList(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha2.NnfStorageLustreSpec, s conversion.Scope) error { + out.TargetType = in.TargetType + out.BackFs = in.BackFs + out.MgsAddress = in.MgsAddress + out.PersistentMgsReference = in.PersistentMgsReference + return nil +} + +// Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha2.NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha2.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { + out.TargetType = in.TargetType + out.BackFs = in.BackFs + out.MgsAddress = in.MgsAddress + out.PersistentMgsReference = in.PersistentMgsReference + return nil +} + +// Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha2.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha2.NnfStorageLustreStatus, s conversion.Scope) error { + out.MgsAddress = in.MgsAddress + out.FileSystemName = in.FileSystemName + out.LustreMgtReference = in.LustreMgtReference + return nil +} + +// Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha2.NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha2.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { + out.MgsAddress = in.MgsAddress + out.FileSystemName = in.FileSystemName + out.LustreMgtReference = in.LustreMgtReference + return nil +} + +// Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha2.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha2.NnfStorageProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfStorageProfile_To_v1alpha2_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha2.NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha2.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfStorageProfile_To_v1alpha1_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha2.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha2.NnfStorageProfileCmdLines, s conversion.Scope) error { + out.Mkfs = in.Mkfs + out.SharedVg = in.SharedVg + out.PvCreate = in.PvCreate + out.PvRemove = in.PvRemove + out.VgCreate = in.VgCreate + if err := Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + return err + } + out.VgRemove = in.VgRemove + out.LvCreate = in.LvCreate + if err := Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + return err + } + out.LvRemove = in.LvRemove + out.MountRabbit = in.MountRabbit + out.MountCompute = in.MountCompute + return nil +} + +// Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha2.NnfStorageProfileCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *v1alpha2.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { + out.Mkfs = in.Mkfs + out.SharedVg = in.SharedVg + out.PvCreate = in.PvCreate + out.PvRemove = in.PvRemove + out.VgCreate = in.VgCreate + if err := Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + return err + } + out.VgRemove = in.VgRemove + out.LvCreate = in.LvCreate + if err := Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + return err + } + out.LvRemove = in.LvRemove + out.MountRabbit = in.MountRabbit + out.MountCompute = in.MountCompute + return nil +} + +// Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *v1alpha2.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha2.NnfStorageProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + if err := Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha2.NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha2.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + if err := Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha2.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha2.NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha2.NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha2.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha2.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha2.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + out.Activate = in.Activate + out.Deactivate = in.Deactivate + return nil +} + +// Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha2.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha2.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + out.Activate = in.Activate + out.Deactivate = in.Deactivate + return nil +} + +// Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha2.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha2.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + out.LockStart = in.LockStart + out.LockStop = in.LockStop + return nil +} + +// Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha2.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha2.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + out.LockStart = in.LockStart + out.LockStop = in.LockStop + return nil +} + +// Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha2.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha2.NnfStorageProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.NnfStorageProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha2.NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha2.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfStorageProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha2.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha2.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + out.ZpoolCreate = in.ZpoolCreate + out.Mkfs = in.Mkfs + out.MountTarget = in.MountTarget + return nil +} + +// Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha2.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *v1alpha2.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + out.ZpoolCreate = in.ZpoolCreate + out.Mkfs = in.Mkfs + out.MountTarget = in.MountTarget + return nil +} + +// Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *v1alpha2.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha2.NnfStorageProfileLustreData, s conversion.Scope) error { + out.CombinedMGTMDT = in.CombinedMGTMDT + out.ExternalMGS = in.ExternalMGS + out.CapacityMGT = in.CapacityMGT + out.CapacityMDT = in.CapacityMDT + out.ExclusiveMDT = in.ExclusiveMDT + out.CapacityScalingFactor = in.CapacityScalingFactor + out.StandaloneMGTPoolName = in.StandaloneMGTPoolName + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + return err + } + out.MountRabbit = in.MountRabbit + out.MountCompute = in.MountCompute + return nil +} + +// Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha2.NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha2.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { + out.CombinedMGTMDT = in.CombinedMGTMDT + out.ExternalMGS = in.ExternalMGS + out.CapacityMGT = in.CapacityMGT + out.CapacityMDT = in.CapacityMDT + out.ExclusiveMDT = in.ExclusiveMDT + out.CapacityScalingFactor = in.CapacityScalingFactor + out.StandaloneMGTPoolName = in.StandaloneMGTPoolName + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + return err + } + out.MountRabbit = in.MountRabbit + out.MountCompute = in.MountCompute + return nil +} + +// Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha2.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha2.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + out.ColocateComputes = in.ColocateComputes + out.Count = in.Count + out.Scale = in.Scale + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + return nil +} + +// Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha2.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha2.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + out.ColocateComputes = in.ColocateComputes + out.Count = in.Count + out.Scale = in.Scale + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + return nil +} + +// Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha2.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha2.NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha2.NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha2.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha2.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha2.NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha2.NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha2.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha2.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha2.NnfStorageSpec, s conversion.Scope) error { + out.FileSystemType = in.FileSystemType + out.UserID = in.UserID + out.GroupID = in.GroupID + out.AllocationSets = *(*[]v1alpha2.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha1_NnfStorageSpec_To_v1alpha2_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha2.NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha2.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { + out.FileSystemType = in.FileSystemType + out.UserID = in.UserID + out.GroupID = in.GroupID + out.AllocationSets = *(*[]NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha2_NnfStorageSpec_To_v1alpha1_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha2.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in, out, s) +} + +func autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha2.NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { + return err + } + out.AllocationSets = *(*[]v1alpha2.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) + out.ResourceError = in.ResourceError + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha1_NnfStorageStatus_To_v1alpha2_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha2.NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha2.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { + return err + } + out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) + out.ResourceError = in.ResourceError + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha2_NnfStorageStatus_To_v1alpha1_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha2.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in, out, s) +} + +func autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha2.NnfSystemStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha2.NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in, out, s) +} + +func autoConvert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha2.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha2.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in, out, s) +} + +func autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha2.NnfSystemStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha2.NnfSystemStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha2.NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in, out, s) +} + +func autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha2.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfSystemStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha2_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha2.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in, out, s) +} + +func autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha2.NnfSystemStorageSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) + out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) + out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) + out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) + out.ComputesTarget = v1alpha2.NnfSystemStorageComputesTarget(in.ComputesTarget) + out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) + out.Capacity = in.Capacity + out.Type = in.Type + out.StorageProfile = in.StorageProfile + out.MakeClientMounts = in.MakeClientMounts + out.ClientMountPath = in.ClientMountPath + return nil +} + +// Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha2.NnfSystemStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in, out, s) +} + +func autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *v1alpha2.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) + out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) + // WARNING: in.ExcludeDisabledRabbits requires manual conversion: does not exist in peer-type + out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) + out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) + out.ComputesTarget = NnfSystemStorageComputesTarget(in.ComputesTarget) + out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) + out.Capacity = in.Capacity + out.Type = in.Type + out.StorageProfile = in.StorageProfile + out.MakeClientMounts = in.MakeClientMounts + out.ClientMountPath = in.ClientMountPath + return nil +} + +func autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha2.NnfSystemStorageStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha2.NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in, out, s) +} + +func autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha2.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha2.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in, out, s) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 045c2cb58..de055172b 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha1 import ( "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -2020,25 +2020,3 @@ func (in *NnfSystemStorageStatus) DeepCopy() *NnfSystemStorageStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VarHandler) DeepCopyInto(out *VarHandler) { - *out = *in - if in.VarMap != nil { - in, out := &in.VarMap, &out.VarMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VarHandler. -func (in *VarHandler) DeepCopy() *VarHandler { - if in == nil { - return nil - } - out := new(VarHandler) - in.DeepCopyInto(out) - return out -} diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go new file mode 100644 index 000000000..b9220489f --- /dev/null +++ b/api/v1alpha2/conversion.go @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +func (*NnfAccess) Hub() {} +func (*NnfContainerProfile) Hub() {} +func (*NnfDataMovement) Hub() {} +func (*NnfDataMovementManager) Hub() {} +func (*NnfDataMovementProfile) Hub() {} +func (*NnfLustreMGT) Hub() {} +func (*NnfNode) Hub() {} +func (*NnfNodeBlockStorage) Hub() {} +func (*NnfNodeECData) Hub() {} +func (*NnfNodeStorage) Hub() {} +func (*NnfPortManager) Hub() {} +func (*NnfStorage) Hub() {} +func (*NnfStorageProfile) Hub() {} +func (*NnfSystemStorage) Hub() {} + +// The conversion-verifier tool wants these...though they're never used. +func (*NnfAccessList) Hub() {} +func (*NnfContainerProfileList) Hub() {} +func (*NnfDataMovementList) Hub() {} +func (*NnfDataMovementManagerList) Hub() {} +func (*NnfDataMovementProfileList) Hub() {} +func (*NnfLustreMGTList) Hub() {} +func (*NnfNodeList) Hub() {} +func (*NnfNodeBlockStorageList) Hub() {} +func (*NnfNodeECDataList) Hub() {} +func (*NnfNodeStorageList) Hub() {} +func (*NnfPortManagerList) Hub() {} +func (*NnfStorageList) Hub() {} +func (*NnfStorageProfileList) Hub() {} +func (*NnfSystemStorageList) Hub() {} diff --git a/api/v1alpha2/groupversion_info.go b/api/v1alpha2/groupversion_info.go new file mode 100644 index 000000000..a49f98a06 --- /dev/null +++ b/api/v1alpha2/groupversion_info.go @@ -0,0 +1,39 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package v1alpha2 contains API Schema definitions for the nnf v1alpha2 API group +// +kubebuilder:object:generate=true +// +groupName=nnf.cray.hpe.com +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "nnf.cray.hpe.com", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha2/nnf_resource_condition_types.go b/api/v1alpha2/nnf_resource_condition_types.go new file mode 100644 index 000000000..529c7c866 --- /dev/null +++ b/api/v1alpha2/nnf_resource_condition_types.go @@ -0,0 +1,115 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Types define the condition type that is recorded by the system. Each storage resource +// defines an array of conditions as state transitions. Entry into and out of the state +// is recorded by the metav1.ConditionStatus. Order must be preserved and consistent between +// the Index and string values. +const ( + ConditionIndexCreateStoragePool = iota + ConditionIndexDeleteStoragePool + ConditionIndexCreateStorageGroup + ConditionIndexCreateFileSystem + ConditionIndexCreateFileShare + ConditionIndexGetResource + ConditionIndexInvalidResource + // INSERT NEW ITEMS HERE - Ensure Condition string is at same index + + numConditions + + ConditionCreateStoragePool = "CreateStoragePool" + ConditionDeleteStoragePool = "DeleteStoragePool" + ConditionCreateStorageGroup = "CreateStorageGroup" + ConditionCreateFileSystem = "CreateFileSystem" + ConditionCreateFileShare = "CreateFileShare" + ConditionGetResource = "GetResource" + ConditionInvalidResource = "InvalidResource" + // INSERT NEW ITEMS HERE - Ensure NewConditions() is updated to contain item and correct ordering +) + +// NewConditions generates a new conditions array for NNFNodeStorage +func NewConditions() []metav1.Condition { + + types := []string{ + ConditionCreateStoragePool, + ConditionDeleteStoragePool, + ConditionCreateStorageGroup, + ConditionCreateFileSystem, + ConditionCreateFileShare, + ConditionGetResource, + ConditionInvalidResource, + } + + if numConditions != len(types) { + panic("Did you forget to include the condition in the types array?") + } + + c := make([]metav1.Condition, len(types)) + for idx := range c { + c[idx] = metav1.Condition{ + Type: types[idx], + Status: metav1.ConditionUnknown, + Reason: ConditionUnknown, + LastTransitionTime: metav1.Now(), + } + } + + c[ConditionIndexCreateStoragePool].Status = metav1.ConditionTrue + c[ConditionIndexCreateStoragePool].LastTransitionTime = metav1.Now() + + return c + +} + +// SetGetResourceFailureCondition sets/gets the specified condition to failed +func SetGetResourceFailureCondition(c []metav1.Condition, err error) { + c[ConditionIndexGetResource] = metav1.Condition{ + Type: ConditionGetResource, + Reason: ConditionFailed, + Status: metav1.ConditionTrue, + Message: err.Error(), + LastTransitionTime: metav1.Now(), + } +} + +// SetResourceInvalidCondition sets/gets the specified condition to invalid +func SetResourceInvalidCondition(c []metav1.Condition, err error) { + c[ConditionIndexInvalidResource] = metav1.Condition{ + Type: ConditionInvalidResource, + Reason: ConditionInvalid, + Status: metav1.ConditionTrue, + Message: err.Error(), + LastTransitionTime: metav1.Now(), + } +} + +// Reason implements the Reason field of a metav1.Condition. In accordance with the metav1.Condition, +// the value should be a CamelCase string and may not be empty. +const ( + ConditionUnknown = "Unknown" + ConditionFailed = "Failed" + ConditionInvalid = "Invalid" + ConditionSuccess = "Success" +) diff --git a/api/v1alpha2/nnf_resource_health_type.go b/api/v1alpha2/nnf_resource_health_type.go new file mode 100644 index 000000000..562118e62 --- /dev/null +++ b/api/v1alpha2/nnf_resource_health_type.go @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" +) + +// NnfResourceHealthType defines the health of an NNF resource. +type NnfResourceHealthType string + +const ( + // ResourceOkay is SF health OK + ResourceOkay NnfResourceHealthType = NnfResourceHealthType(sf.OK_RH) + + // ResourceWarning is SF health WARNING + ResourceWarning = NnfResourceHealthType(sf.WARNING_RH) + + // ResourceCritical is SF health CRITICAL + ResourceCritical = NnfResourceHealthType(sf.CRITICAL_RH) +) + +// ResourceHealth maps a SF ResourceStatus to an NNFResourceHealthType +func ResourceHealth(s sf.ResourceStatus) NnfResourceHealthType { + switch s.Health { + case sf.OK_RH: + return ResourceOkay + case sf.WARNING_RH: + return ResourceWarning + case sf.CRITICAL_RH: + return ResourceCritical + } + + panic("Unknown Resource Health " + string(s.Health)) +} + +// UpdateIfWorseThan examines the input health type and update the health if it is worse +// than the stored value +func (rht NnfResourceHealthType) UpdateIfWorseThan(health *NnfResourceHealthType) { + switch rht { + case ResourceWarning: + if *health == ResourceOkay { + *health = ResourceWarning + } + case ResourceCritical: + if *health != ResourceCritical { + *health = ResourceCritical + } + default: + } +} diff --git a/api/v1alpha2/nnf_resource_state_type.go b/api/v1alpha2/nnf_resource_state_type.go new file mode 100644 index 000000000..d9dbc1ca7 --- /dev/null +++ b/api/v1alpha2/nnf_resource_state_type.go @@ -0,0 +1,48 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +// NnfResourceStateType defines valid states that a user can configure an NNF resource +type NnfResourceStateType string + +const ( + // + // Below reflects the current status of a static resource + // + + // ResourceEnable means this static NNF resource should be enabled. + ResourceEnable NnfResourceStateType = "Enable" + + // ResourceDisable means this static NNF resource should be disabled. Not all static resources can be disabled. + ResourceDisable = "Disable" + + // + // Below reflects the current status of a managed (user created) resource + // + + // ResourceCreate means the resource should be created and enabled for operation. For a newly + // created resource, the default state is create. + ResourceCreate NnfResourceStateType = "Create" + + // ResourceDestroy means the resource should be released from the allocated resource pool, and + // this resource and all child resources will be released to the free resource pools + // managed by the system. + ResourceDestroy = "Destroy" +) diff --git a/api/v1alpha2/nnf_resource_status_type.go b/api/v1alpha2/nnf_resource_status_type.go new file mode 100644 index 000000000..23c76cbe2 --- /dev/null +++ b/api/v1alpha2/nnf_resource_status_type.go @@ -0,0 +1,152 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + + sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" +) + +// NnfResourceStatusType is the string that indicates the resource's status +type NnfResourceStatusType string + +const ( + // + // Below reflects the current status of a static resource + // + + // ResourceEnabled means the static NNF resource is enabled and ready to fullfil requests for + // managed resources. + ResourceEnabled NnfResourceStatusType = NnfResourceStatusType(sf.ENABLED_RST) + + // ResourceDisabled means the static NNF resource is present but disabled and not available for use + ResourceDisabled = NnfResourceStatusType(sf.DISABLED_RST) + + // ResourceNotPresent means the static NNF resource is not found; likely because it is disconnected + // or in a powered down state. + ResourceNotPresent = "NotPresent" + + // ResourceOffline means the static NNF resource is offline and the NNF Node cannot communicate with + // the resource. This differs from a NotPresent status in that the device is known to exist. + ResourceOffline = "Offline" + + // + // Below reflects the current status of a managed (user created) resource + // + + // ResourceStarting means the NNF resource is currently in the process of starting - resources + // are being prepared for transition to an Active state. + ResourceStarting = NnfResourceStatusType(sf.STARTING_RST) + + // ResourceDeleting means the NNF resource is currently in the process of being deleted - the resource + // and all child resources are being returned to the NNF node's free resources. Upon a successful + // deletion, the resource will be removed from the list of managed NNF resources + ResourceDeleting = "Deleting" + + // ResourceDeleted means the NNF resource was deleted. This reflects the state where the NNF resource does + // not exist in the NNF space, but the resource might still exist in Kubernetes. A resource in + // this state suggests that Kubernetes is unable to delete the object. + ResourceDeleted = "Deleted" + + // ResourceReady means the NNF resource is ready for use. + ResourceReady = "Ready" + + // ResourceFailed means the NNF resource has failed during startup or execution. A failed state is + // an unrecoverable condition. Additional information about the Failed cause can be found by + // looking at the owning resource's Conditions field. A failed resource can only be removed + // by transition to a Delete state. + ResourceFailed = "Failed" + + // ResourceInvalid means the NNF resource configuration is invalid due to an improper format or arrangement + // of listed resource parameters. + ResourceInvalid = "Invalid" +) + +// UpdateIfWorseThan updates the stored status of the resource if the new status is worse than what was stored +func (rst NnfResourceStatusType) UpdateIfWorseThan(status *NnfResourceStatusType) { + switch rst { + case ResourceStarting: + if *status == ResourceReady { + *status = ResourceStarting + } + case ResourceFailed: + if *status != ResourceFailed { + *status = ResourceFailed + } + default: + } +} + +func (rst NnfResourceStatusType) ConvertToDWSResourceStatus() dwsv1alpha2.ResourceStatus { + switch rst { + case ResourceStarting: + return dwsv1alpha2.StartingStatus + case ResourceReady: + return dwsv1alpha2.ReadyStatus + case ResourceDisabled: + return dwsv1alpha2.DisabledStatus + case ResourceNotPresent: + return dwsv1alpha2.NotPresentStatus + case ResourceOffline: + return dwsv1alpha2.OfflineStatus + case ResourceFailed: + return dwsv1alpha2.FailedStatus + default: + return dwsv1alpha2.UnknownStatus + } +} + +// StaticResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. +func StaticResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { + switch s.State { + case sf.STARTING_RST: + return ResourceStarting + case sf.ENABLED_RST: + return ResourceReady + case sf.DISABLED_RST: + return ResourceDisabled + case sf.ABSENT_RST: + return ResourceNotPresent + case sf.UNAVAILABLE_OFFLINE_RST: + return ResourceOffline + } + + panic("Unknown Resource State " + string(s.State)) +} + +// ResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. +func ResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { + switch s.State { + case sf.STARTING_RST: + return ResourceStarting + case sf.ENABLED_RST: + return ResourceReady + case sf.DISABLED_RST: + return ResourceDisabled + case sf.ABSENT_RST: + return ResourceNotPresent + case sf.UNAVAILABLE_OFFLINE_RST: + return ResourceOffline + + default: + return ResourceFailed + } +} diff --git a/api/v1alpha2/nnf_resource_type.go b/api/v1alpha2/nnf_resource_type.go new file mode 100644 index 000000000..bc0a7a8bd --- /dev/null +++ b/api/v1alpha2/nnf_resource_type.go @@ -0,0 +1,33 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +// NnfResourceStatus provides common fields that are included in all NNF Resources +type NnfResourceStatus struct { + // ID reflects the NNF Node unique identifier for this NNF Server resource. + ID string `json:"id,omitempty"` + + // Name reflects the common name of this NNF Server resource. + Name string `json:"name,omitempty"` + + Status NnfResourceStatusType `json:"status,omitempty"` + + Health NnfResourceHealthType `json:"health,omitempty"` +} diff --git a/api/v1alpha2/nnfaccess_types.go b/api/v1alpha2/nnfaccess_types.go new file mode 100644 index 000000000..258d8f5a1 --- /dev/null +++ b/api/v1alpha2/nnfaccess_types.go @@ -0,0 +1,131 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NnfAccessSpec defines the desired state of NnfAccess +type NnfAccessSpec struct { + // DesiredState is the desired state for the mounts on the client + // +kubebuilder:validation:Enum=mounted;unmounted + DesiredState string `json:"desiredState"` + + // TeardownState is the desired state of the workflow for this NNF Access resource to + // be torn down and deleted. + // +kubebuilder:validation:Enum:=PreRun;PostRun;Teardown + // +kubebuilder:validation:Type:=string + TeardownState dwsv1alpha2.WorkflowState `json:"teardownState"` + + // Target specifies which storage targets the client should mount + // - single: Only one of the storage the client can access + // - all: All of the storage the client can access + // - shared: Multiple clients access the same storage + // +kubebuilder:validation:Enum=single;all;shared + Target string `json:"target"` + + // UserID for the new mount. Currently only used for raw + UserID uint32 `json:"userID"` + + // GroupID for the new mount. Currently only used for raw + GroupID uint32 `json:"groupID"` + + // ClientReference is for a client resource. (DWS) Computes is the only client + // resource type currently supported + ClientReference corev1.ObjectReference `json:"clientReference,omitempty"` + + // MountPath for the storage target on the client + MountPath string `json:"mountPath,omitempty"` + + // MakeClientMounts determines whether the ClientMount resources are made, or if only + // the access list on the NnfNodeBlockStorage is updated + // +kubebuilder:default=true + MakeClientMounts bool `json:"makeClientMounts"` + + // MountPathPrefix to mount the storage target on the client when there is + // more than one mount on a client + + MountPathPrefix string `json:"mountPathPrefix,omitempty"` + + // StorageReference is the NnfStorage reference + StorageReference corev1.ObjectReference `json:"storageReference"` +} + +// NnfAccessStatus defines the observed state of NnfAccess +type NnfAccessStatus struct { + // State is the current state + // +kubebuilder:validation:Enum=mounted;unmounted + State string `json:"state"` + + // Ready signifies whether status.state has been achieved + Ready bool `json:"ready"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="The desired state" +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="The current state" +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="Whether the state has been achieved" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// NnfAccess is the Schema for the nnfaccesses API +type NnfAccess struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfAccessSpec `json:"spec,omitempty"` + Status NnfAccessStatus `json:"status,omitempty"` +} + +func (a *NnfAccess) GetStatus() updater.Status[*NnfAccessStatus] { + return &a.Status +} + +//+kubebuilder:object:root=true + +// NnfAccessList contains a list of NnfAccess +type NnfAccessList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfAccess `json:"items"` +} + +func (n *NnfAccessList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfAccess{}, &NnfAccessList{}) +} diff --git a/api/v1alpha2/nnfaccess_webhook.go b/api/v1alpha2/nnfaccess_webhook.go new file mode 100644 index 000000000..da2583e48 --- /dev/null +++ b/api/v1alpha2/nnfaccess_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfaccesslog = logf.Log.WithName("nnfaccess-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfAccess) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfaccess_webhook_test.go b/api/v1alpha2/nnfaccess_webhook_test.go new file mode 100644 index 000000000..ef0483acb --- /dev/null +++ b/api/v1alpha2/nnfaccess_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfAccess Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfAccess under Conversion Webhook", func() { + It("Should get the converted version of NnfAccess", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfcontainerprofile_types.go b/api/v1alpha2/nnfcontainerprofile_types.go new file mode 100644 index 000000000..e2a84dd89 --- /dev/null +++ b/api/v1alpha2/nnfcontainerprofile_types.go @@ -0,0 +1,141 @@ +/* + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ContainerLabel = "nnf.cray.hpe.com/container" + ContainerUser = "user" + ContainerMPIUser = "mpiuser" +) + +// NnfContainerProfileSpec defines the desired state of NnfContainerProfile +type NnfContainerProfileData struct { + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // List of possible filesystems supported by this container profile + Storages []NnfContainerProfileStorage `json:"storages,omitempty"` + + // Containers are launched in the PreRun state. Allow this many seconds for the containers to + // start before declaring an error to the workflow. + // Defaults to 300 if not set. A value of 0 disables this behavior. + // +kubebuilder:default:=300 + // +kubebuilder:validation:Minimum:=0 + PreRunTimeoutSeconds *int64 `json:"preRunTimeoutSeconds,omitempty"` + + // Containers are expected to complete in the PostRun State. Allow this many seconds for the + // containers to exit before declaring an error the workflow. + // Defaults to 300 if not set. A value of 0 disables this behavior. + // +kubebuilder:default:=300 + // +kubebuilder:validation:Minimum:=0 + PostRunTimeoutSeconds *int64 `json:"postRunTimeoutSeconds,omitempty"` + + // Specifies the number of times a container will be retried upon a failure. A new pod is + // deployed on each retry. Defaults to 6 by kubernetes itself and must be set. A value of 0 + // disables retries. + // +kubebuilder:validation:Minimum:=0 + // +kubebuilder:default:=6 + RetryLimit int32 `json:"retryLimit"` + + // UserID specifies the user ID that is allowed to use this profile. If this is specified, only + // Workflows that have a matching user ID can select this profile. + UserID *uint32 `json:"userID,omitempty"` + + // GroupID specifies the group ID that is allowed to use this profile. If this is specified, + // only Workflows that have a matching group ID can select this profile. + GroupID *uint32 `json:"groupID,omitempty"` + + // Number of ports to open for communication with the user container. These ports are opened on + // the targeted NNF nodes and can be accessed outside of the k8s cluster (e.g. compute nodes). + // The requested ports are made available as environment variables inside the container and in + // the DWS workflow (NNF_CONTAINER_PORTS). + NumPorts int32 `json:"numPorts,omitempty"` + + // Spec to define the containers created from this profile. This is used for non-MPI containers. + // Refer to the K8s documentation for `PodSpec` for more definition: + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + // Either this or MPISpec must be provided, but not both. + Spec *corev1.PodSpec `json:"spec,omitempty"` + + // MPIJobSpec to define the MPI containers created from this profile. This functionality is + // provided via mpi-operator, a 3rd party tool to assist in running MPI applications across + // worker containers. + // Either this or Spec must be provided, but not both. + // + // All the fields defined drive mpi-operator behavior. See the type definition of MPISpec for + // more detail: + // https://github.com/kubeflow/mpi-operator/blob/v0.4.0/pkg/apis/kubeflow/v2beta1/types.go#L137 + // + // Note: most of these fields are fully customizable with a few exceptions. These fields are + // overridden by NNF software to ensure proper behavior to interface with the DWS workflow + // - Replicas + // - RunPolicy.BackoffLimit (this is set above by `RetryLimit`) + // - Worker/Launcher.RestartPolicy + MPISpec *mpiv2beta1.MPIJobSpec `json:"mpiSpec,omitempty"` +} + +// NnfContainerProfileStorage defines the mount point information that will be available to the +// container +type NnfContainerProfileStorage struct { + // Name specifies the name of the mounted filesystem; must match the user supplied #DW directive + Name string `json:"name"` + + // Optional designates that this filesystem is available to be mounted, but can be ignored by + // the user not supplying this filesystem in the #DW directives + //+kubebuilder:default:=false + Optional bool `json:"optional"` + + // For DW_GLOBAL_ (global lustre) storages, the access mode must match what is configured in + // the LustreFilesystem resource for the namespace. Defaults to `ReadWriteMany` for global + // lustre, otherwise empty. + PVCMode corev1.PersistentVolumeAccessMode `json:"pvcMode,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// NnfContainerProfile is the Schema for the nnfcontainerprofiles API +type NnfContainerProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Data NnfContainerProfileData `json:"data"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// NnfContainerProfileList contains a list of NnfContainerProfile +type NnfContainerProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfContainerProfile `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfContainerProfile{}, &NnfContainerProfileList{}) +} diff --git a/api/v1alpha1/nnfcontainerprofile_webhook.go b/api/v1alpha2/nnfcontainerprofile_webhook.go similarity index 97% rename from api/v1alpha1/nnfcontainerprofile_webhook.go rename to api/v1alpha2/nnfcontainerprofile_webhook.go index 31795fc56..aaa587ed3 100644 --- a/api/v1alpha1/nnfcontainerprofile_webhook.go +++ b/api/v1alpha2/nnfcontainerprofile_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "fmt" @@ -45,7 +45,7 @@ func (r *NnfContainerProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha1-nnfcontainerprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfcontainerprofiles,verbs=create;update,versions=v1alpha1,name=vnnfcontainerprofile.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha2-nnfcontainerprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfcontainerprofiles,verbs=create;update,versions=v1alpha2,name=vnnfcontainerprofile.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &NnfContainerProfile{} diff --git a/api/v1alpha1/nnfcontainerprofile_webhook_test.go b/api/v1alpha2/nnfcontainerprofile_webhook_test.go similarity index 99% rename from api/v1alpha1/nnfcontainerprofile_webhook_test.go rename to api/v1alpha2/nnfcontainerprofile_webhook_test.go index bd66cc1ba..aaa6346a1 100644 --- a/api/v1alpha1/nnfcontainerprofile_webhook_test.go +++ b/api/v1alpha2/nnfcontainerprofile_webhook_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "context" diff --git a/api/v1alpha2/nnfdatamovement_types.go b/api/v1alpha2/nnfdatamovement_types.go new file mode 100644 index 000000000..6ccf87b4e --- /dev/null +++ b/api/v1alpha2/nnfdatamovement_types.go @@ -0,0 +1,289 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // The required namespace for an NNF Data Movement operation. This is for system wide (lustre) + // data movement. Individual nodes may also perform data movement in which case they use the + // NNF Node Name as the namespace. + DataMovementNamespace = "nnf-dm-system" + + // The namespace for NnfDataMovementProfiles that are not pinned. + DataMovementProfileNamespace = "nnf-system" +) + +// NnfDataMovementSpec defines the desired state of NnfDataMovement +type NnfDataMovementSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Source describes the source of the data movement operation + Source *NnfDataMovementSpecSourceDestination `json:"source,omitempty"` + + // Destination describes the destination of the data movement operation + Destination *NnfDataMovementSpecSourceDestination `json:"destination,omitempty"` + + // User Id specifies the user ID for the data movement operation. This value is used + // in conjunction with the group ID to ensure the user has valid permissions to perform + // the data movement operation. + UserId uint32 `json:"userId,omitempty"` + + // Group Id specifies the group ID for the data movement operation. This value is used + // in conjunction with the user ID to ensure the user has valid permissions to perform + // the data movement operation. + GroupId uint32 `json:"groupId,omitempty"` + + // Set to true if the data movement operation should be canceled. + // +kubebuilder:default:=false + Cancel bool `json:"cancel,omitempty"` + + // ProfileReference is an object reference to an NnfDataMovementProfile that is used to + // configure data movement. If empty, the default profile is used. + ProfileReference corev1.ObjectReference `json:"profileReference,omitempty"` + + // User defined configuration on how data movement should be performed. This overrides the + // configuration defined in the supplied ProfileReference/NnfDataMovementProfile. These values + // are typically set by the Copy Offload API. + UserConfig *NnfDataMovementConfig `json:"userConfig,omitempty"` +} + +// NnfDataMovementSpecSourceDestination defines the desired source or destination of data movement +type NnfDataMovementSpecSourceDestination struct { + + // Path describes the location of the user data relative to the storage instance + Path string `json:"path,omitempty"` + + // Storage describes the storage backing this data movement specification; Storage can reference + // either NNF storage or global Lustre storage depending on the object references Kind field. + StorageReference corev1.ObjectReference `json:"storageReference,omitempty"` +} + +// NnfDataMovementConfig provides a way for a user to override the data movement behavior on a +// per DM basis. +type NnfDataMovementConfig struct { + + // Fake the Data Movement operation. The system "performs" Data Movement but the command to do so + // is trivial. This means a Data Movement request is still submitted but the IO is skipped. + // +kubebuilder:default:=false + Dryrun bool `json:"dryrun,omitempty"` + + // Extra options to pass to the mpirun command (used to perform data movement). + MpirunOptions string `json:"mpirunOptions,omitempty"` + + // Extra options to pass to the dcp command (used to perform data movement). + DcpOptions string `json:"dcpOptions,omitempty"` + + // If true, enable the command's stdout to be saved in the log when the command completes + // successfully. On failure, the output is always logged. + // Note: Enabling this option may degrade performance. + // +kubebuilder:default:=false + LogStdout bool `json:"logStdout,omitempty"` + + // Similar to LogStdout, store the command's stdout in Status.Message when the command completes + // successfully. On failure, the output is always stored. + // Note: Enabling this option may degrade performance. + // +kubebuilder:default:=false + StoreStdout bool `json:"storeStdout,omitempty"` + + // The number of slots specified in the MPI hostfile. A value of 0 disables the use of slots in + // the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + Slots *int `json:"slots,omitempty"` + + // The number of max_slots specified in the MPI hostfile. A value of 0 disables the use of slots + // in the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + MaxSlots *int `json:"maxSlots,omitempty"` +} + +// NnfDataMovementCommandStatus defines the observed status of the underlying data movement +// command (MPI File Utils' `dcp` command). +type NnfDataMovementCommandStatus struct { + // The command that was executed during data movement. + Command string `json:"command,omitempty"` + + // ElapsedTime reflects the elapsed time since the underlying data movement command started. + ElapsedTime metav1.Duration `json:"elapsedTime,omitempty"` + + // ProgressPercentage refects the progress of the underlying data movement command as captured from + // standard output. A best effort is made to parse the command output as a percentage. If no + // progress has yet to be measured than this field is omitted. If the latest command output does + // not contain a valid percentage, then the value is unchanged from the previously parsed value. + ProgressPercentage *int32 `json:"progress,omitempty"` + + // LastMessage reflects the last message received over standard output or standard error as + // captured by the underlying data movement command. + LastMessage string `json:"lastMessage,omitempty"` + + // LastMessageTime reflects the time at which the last message was received over standard output + // or standard error by the underlying data movement command. + LastMessageTime metav1.MicroTime `json:"lastMessageTime,omitempty"` + + // Seconds is parsed from the dcp output when the command is finished. + Seconds string `json:"seconds,omitempty"` + + // Items is parsed from the dcp output when the command is finished. This is a total of + // the number of directories, files, and links that dcp copied. + Items *int32 `json:"items,omitempty"` + + // Directories is parsed from the dcp output when the command is finished. This is the number of + // directories that dcp copied. Note: This value may be inflated due to NNF index mount + // directories when copying from XFS or GFS2 filesystems. + Directories *int32 `json:"directories,omitempty"` + + // Files is parsed from the dcp output when the command is finished. This is the number of files + // that dcp copied. + Files *int32 `json:"files,omitempty"` + + // Links is parsed from the dcp output when the command is finished. This is the number of links + // that dcp copied. + Links *int32 `json:"links,omitempty"` + + // Data is parsed from the dcp output when the command is finished. This is the total amount of + // data copied by dcp. + Data string `json:"data,omitempty"` + + // Rate is parsed from the dcp output when the command is finished. This is transfer rate of the + // data copied by dcp. + Rate string `json:"rate,omitempty"` +} + +// NnfDataMovementStatus defines the observed state of NnfDataMovement +type NnfDataMovementStatus struct { + // Current state of data movement. + // +kubebuilder:validation:Enum=Starting;Running;Finished + State string `json:"state,omitempty"` + + // Status of the current state. + // +kubebuilder:validation:Enum=Success;Failed;Invalid;Cancelled + Status string `json:"status,omitempty"` + + // Message contains any text that explains the Status. If Data Movement failed or storeStdout is + // enabled, this will contain the command's output. + Message string `json:"message,omitempty"` + + // StartTime reflects the time at which the Data Movement operation started. + StartTime *metav1.MicroTime `json:"startTime,omitempty"` + + // EndTime reflects the time at which the Data Movement operation ended. + EndTime *metav1.MicroTime `json:"endTime,omitempty"` + + // Restarts contains the number of restarts of the Data Movement operation. + Restarts int `json:"restarts,omitempty"` + + // CommandStatus reflects the current status of the underlying Data Movement command + // as it executes. The command status is polled at a certain frequency to avoid excessive + // updates to the Data Movement resource. + CommandStatus *NnfDataMovementCommandStatus `json:"commandStatus,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +// Types describing the various data movement status conditions. +const ( + DataMovementConditionTypeStarting = "Starting" + DataMovementConditionTypeRunning = "Running" + DataMovementConditionTypeFinished = "Finished" +) + +// Reasons describing the various data movement status conditions. Must be +// in CamelCase format (see metav1.Condition) +const ( + DataMovementConditionReasonSuccess = "Success" + DataMovementConditionReasonFailed = "Failed" + DataMovementConditionReasonInvalid = "Invalid" + DataMovementConditionReasonCancelled = "Cancelled" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="Current state" +//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Status of current state" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// NnfDataMovement is the Schema for the nnfdatamovements API +type NnfDataMovement struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfDataMovementSpec `json:"spec,omitempty"` + Status NnfDataMovementStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// NnfDataMovementList contains a list of NnfDataMovement +type NnfDataMovementList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfDataMovement `json:"items"` +} + +func (n *NnfDataMovementList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +const ( + // DataMovementTeardownStateLabel is the label applied to Data Movement and related resources that describes + // the workflow state when the resource is no longer need and can be safely deleted. + DataMovementTeardownStateLabel = "nnf.cray.hpe.com/teardown_state" + + // DataMovementInitiatorLabel is the label applied to Data Movement resources that describes the origin of + // data movement request. This would be from a copy_in/copy_out directive or from a compute node via the + // Copy Offload API (i.e. nnf-dm daemon). + DataMovementInitiatorLabel = "dm.cray.hpe.com/initiator" +) + +func AddDataMovementTeardownStateLabel(object metav1.Object, state dwsv1alpha2.WorkflowState) { + labels := object.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[DataMovementTeardownStateLabel] = string(state) + object.SetLabels(labels) +} + +func AddDataMovementInitiatorLabel(object metav1.Object, initiator string) { + labels := object.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[DataMovementInitiatorLabel] = initiator + object.SetLabels(labels) +} + +func init() { + SchemeBuilder.Register(&NnfDataMovement{}, &NnfDataMovementList{}) +} diff --git a/api/v1alpha2/nnfdatamovement_webhook.go b/api/v1alpha2/nnfdatamovement_webhook.go new file mode 100644 index 000000000..5d63c61a3 --- /dev/null +++ b/api/v1alpha2/nnfdatamovement_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfdatamovementlog = logf.Log.WithName("nnfdatamovement-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfDataMovement) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfdatamovement_webhook_test.go b/api/v1alpha2/nnfdatamovement_webhook_test.go new file mode 100644 index 000000000..9a72a6278 --- /dev/null +++ b/api/v1alpha2/nnfdatamovement_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfDataMovement Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfDataMovement under Conversion Webhook", func() { + It("Should get the converted version of NnfDataMovement", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfdatamovementmanager_types.go b/api/v1alpha2/nnfdatamovementmanager_types.go new file mode 100644 index 000000000..6867caa1e --- /dev/null +++ b/api/v1alpha2/nnfdatamovementmanager_types.go @@ -0,0 +1,106 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataWorkflowServices/dws/utils/updater" +) + +const ( + DataMovementWorkerLabel = "dm.cray.hpe.com/worker" + + // The name of the expected Data Movement manager. This is to ensure Data Movement is ready in + // the DataIn/DataOut stages before attempting data movement operations. + DataMovementManagerName = "nnf-dm-manager-controller-manager" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfDataMovementManagerSpec defines the desired state of NnfDataMovementManager +type NnfDataMovementManagerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Selector defines the pod selector used in scheduling the worker nodes. This value is duplicated + // to the template.spec.metadata.labels to satisfy the requirements of the worker's Daemon Set. + Selector metav1.LabelSelector `json:"selector"` + + // Template defines the pod template that is used for the basis of the worker Daemon Set that + // manages the per node data movement operations. + Template corev1.PodTemplateSpec `json:"template"` + + // UpdateStrategy defines the UpdateStrategy that is used for the basis of the worker Daemon Set + // that manages the per node data movement operations. + UpdateStrategy appsv1.DaemonSetUpdateStrategy `json:"updateStrategy"` + + // Host Path defines the directory location of shared mounts on an individual worker node. + HostPath string `json:"hostPath"` + + // Mount Path defines the location within the container at which the Host Path volume should be mounted. + MountPath string `json:"mountPath"` +} + +// NnfDataMovementManagerStatus defines the observed state of NnfDataMovementManager +type NnfDataMovementManagerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Ready indicates that the Data Movement Manager has achieved the desired readiness state + // and all managed resources are initialized. + // +kubebuilder:default:=false + Ready bool `json:"ready"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if manager readied all resoures" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// NnfDataMovementManager is the Schema for the nnfdatamovementmanagers API +type NnfDataMovementManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfDataMovementManagerSpec `json:"spec,omitempty"` + Status NnfDataMovementManagerStatus `json:"status,omitempty"` +} + +func (m *NnfDataMovementManager) GetStatus() updater.Status[*NnfDataMovementManagerStatus] { + return &m.Status +} + +//+kubebuilder:object:root=true + +// NnfDataMovementManagerList contains a list of NnfDataMovementManager +type NnfDataMovementManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfDataMovementManager `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfDataMovementManager{}, &NnfDataMovementManagerList{}) +} diff --git a/api/v1alpha2/nnfdatamovementmanager_webhook.go b/api/v1alpha2/nnfdatamovementmanager_webhook.go new file mode 100644 index 000000000..8174dfd09 --- /dev/null +++ b/api/v1alpha2/nnfdatamovementmanager_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfdatamovementmanagerlog = logf.Log.WithName("nnfdatamovementmanager-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfDataMovementManager) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfdatamovementmanager_webhook_test.go b/api/v1alpha2/nnfdatamovementmanager_webhook_test.go new file mode 100644 index 000000000..79fedf021 --- /dev/null +++ b/api/v1alpha2/nnfdatamovementmanager_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfDataMovementManager Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfDataMovementManager under Conversion Webhook", func() { + It("Should get the converted version of NnfDataMovementManager", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfdatamovementprofile_types.go b/api/v1alpha2/nnfdatamovementprofile_types.go new file mode 100644 index 000000000..cb2b23011 --- /dev/null +++ b/api/v1alpha2/nnfdatamovementprofile_types.go @@ -0,0 +1,124 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NnfDataMovementProfileData defines the desired state of NnfDataMovementProfile +type NnfDataMovementProfileData struct { + + // Default is true if this instance is the default resource to use + // +kubebuilder:default:=false + Default bool `json:"default,omitempty"` + + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // Slots is the number of slots specified in the MPI hostfile. A value of 0 disables the use of + // slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + // +kubebuilder:default:=8 + // +kubebuilder:validation:Minimum:=0 + Slots int `json:"slots"` + + // MaxSlots is the number of max_slots specified in the MPI hostfile. A value of 0 disables the + // use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + // +kubebuilder:default:=0 + // +kubebuilder:validation:Minimum:=0 + MaxSlots int `json:"maxSlots"` + + // Command to execute to perform data movement. $VARS are replaced by the nnf software and must + // be present in the command. + // Available $VARS: + // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + // slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` + // UID: User ID that is inherited from the Workflow + // GID: Group ID that is inherited from the Workflow + // SRC: source for the data movement + // DEST destination for the data movement + // +kubebuilder:default:="ulimit -n 2048 && mpirun --allow-run-as-root --hostfile $HOSTFILE dcp --progress 1 --uid $UID --gid $GID $SRC $DEST" + Command string `json:"command"` + + // If true, enable the command's stdout to be saved in the log when the command completes + // successfully. On failure, the output is always logged. + // +kubebuilder:default:=false + LogStdout bool `json:"logStdout,omitempty"` + + // Similar to logStdout, store the command's stdout in Status.Message when the command completes + // successfully. On failure, the output is always stored. + // +kubebuilder:default:=false + StoreStdout bool `json:"storeStdout,omitempty"` + + // NnfDataMovement resources have the ability to collect and store the progress percentage and the + // last few lines of output in the CommandStatus field. This number is used for the interval to collect + // the progress data. `dcp --progress N` must be included in the data movement command in order for + // progress to be collected. A value of 0 disables this functionality. + // +kubebuilder:default:=5 + // +kubebuilder:validation:Minimum:=0 + ProgressIntervalSeconds int `json:"progressIntervalSeconds,omitempty"` + + // CreateDestDir will ensure that the destination directory exists before performing data + // movement. This will cause a number of stat commands to determine the source and destination + // file types, so that the correct pathing for the destination can be determined. Then, a mkdir + // is issued. + // +kubebuilder:default:=true + CreateDestDir bool `json:"createDestDir"` + + // If CreateDestDir is true, then use StatCommand to perform the stat commands. + // Use setpriv to stat the path with the specified UID/GID. + // Available $VARS: + // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + // slots/max_slots for each host. This hostfile is created at + // `/tmp//hostfile`. This is the same hostfile used as the one for Command. + // UID: User ID that is inherited from the Workflow + // GID: Group ID that is inherited from the Workflow + // PATH: Path to stat + // +kubebuilder:default:="mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- setpriv --euid $UID --egid $GID --clear-groups stat --cached never -c '%F' $PATH" + StatCommand string `json:"statCommand"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles API +type NnfDataMovementProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Data NnfDataMovementProfileData `json:"data,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// NnfDataMovementProfileList contains a list of NnfDataMovementProfile +type NnfDataMovementProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfDataMovementProfile `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfDataMovementProfile{}, &NnfDataMovementProfileList{}) +} diff --git a/api/v1alpha1/nnfdatamovementprofile_webhook.go b/api/v1alpha2/nnfdatamovementprofile_webhook.go similarity index 96% rename from api/v1alpha1/nnfdatamovementprofile_webhook.go rename to api/v1alpha2/nnfdatamovementprofile_webhook.go index 7f39d845a..f0d494b5a 100644 --- a/api/v1alpha1/nnfdatamovementprofile_webhook.go +++ b/api/v1alpha2/nnfdatamovementprofile_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "fmt" @@ -43,7 +43,7 @@ func (r *NnfDataMovementProfile) SetupWebhookWithManager(mgr ctrl.Manager) error // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -// +kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha1-nnfdatamovementprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfdatamovementprofiles,verbs=create;update,versions=v1alpha1,name=vnnfdatamovementprofile.kb.io,admissionReviewVersions=v1 +// +kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha2-nnfdatamovementprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfdatamovementprofiles,verbs=create;update,versions=v1alpha2,name=vnnfdatamovementprofile.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &NnfDataMovementProfile{} diff --git a/api/v1alpha1/nnfdatamovementprofile_webhook_test.go b/api/v1alpha2/nnfdatamovementprofile_webhook_test.go similarity index 99% rename from api/v1alpha1/nnfdatamovementprofile_webhook_test.go rename to api/v1alpha2/nnfdatamovementprofile_webhook_test.go index cc7971cf5..e486b8eda 100644 --- a/api/v1alpha1/nnfdatamovementprofile_webhook_test.go +++ b/api/v1alpha2/nnfdatamovementprofile_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "context" diff --git a/api/v1alpha2/nnflustremgt_types.go b/api/v1alpha2/nnflustremgt_types.go new file mode 100644 index 000000000..1fa720246 --- /dev/null +++ b/api/v1alpha2/nnflustremgt_types.go @@ -0,0 +1,108 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NnfLustreMGTSpec defines the desired state of NnfLustreMGT +type NnfLustreMGTSpec struct { + // Addresses is the list of LNet addresses for the MGT + Addresses []string `json:"addresses"` + + // FsNameBlackList is a list of fsnames that can't be used. This may be + // necessary if the MGT hosts file systems external to Rabbit + FsNameBlackList []string `json:"fsNameBlackList,omitempty"` + + // FsNameStart is the starting fsname to be used + // +kubebuilder:validation:MaxLength:=8 + // +kubebuilder:validation:MinLength:=8 + FsNameStart string `json:"fsNameStart,omitempty"` + + // FsNameStartReference can be used to add a configmap where the starting fsname is + // stored. If this reference is set, it takes precendence over FsNameStart. The configmap + // will be updated with the next available fsname anytime an fsname is used. + FsNameStartReference corev1.ObjectReference `json:"fsNameStartReference,omitempty"` + + // ClaimList is the list of currently in use fsnames + ClaimList []corev1.ObjectReference `json:"claimList,omitempty"` +} + +// NnfLustreMGTStatus defines the current state of NnfLustreMGT +type NnfLustreMGTStatus struct { + // FsNameNext is the next available fsname that hasn't been used + // +kubebuilder:validation:MaxLength:=8 + // +kubebuilder:validation:MinLength:=8 + FsNameNext string `json:"fsNameNext,omitempty"` + + // ClaimList is the list of currently in use fsnames + ClaimList []NnfLustreMGTStatusClaim `json:"claimList,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +type NnfLustreMGTStatusClaim struct { + Reference corev1.ObjectReference `json:"reference,omitempty"` + FsName string `json:"fsname,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// NnfLustreMGT is the Schema for the nnfstorageprofiles API +type NnfLustreMGT struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfLustreMGTSpec `json:"spec,omitempty"` + Status NnfLustreMGTStatus `json:"status,omitempty"` +} + +func (a *NnfLustreMGT) GetStatus() updater.Status[*NnfLustreMGTStatus] { + return &a.Status +} + +//+kubebuilder:object:root=true + +// NnfLustreMGTList contains a list of NnfLustreMGT +type NnfLustreMGTList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfLustreMGT `json:"items"` +} + +func (n *NnfLustreMGTList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfLustreMGT{}, &NnfLustreMGTList{}) +} diff --git a/api/v1alpha2/nnflustremgt_webhook.go b/api/v1alpha2/nnflustremgt_webhook.go new file mode 100644 index 000000000..3d6b1732c --- /dev/null +++ b/api/v1alpha2/nnflustremgt_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnflustremgtlog = logf.Log.WithName("nnflustremgt-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfLustreMGT) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnflustremgt_webhook_test.go b/api/v1alpha2/nnflustremgt_webhook_test.go new file mode 100644 index 000000000..b97399ba8 --- /dev/null +++ b/api/v1alpha2/nnflustremgt_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfLustreMGT Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfLustreMGT under Conversion Webhook", func() { + It("Should get the converted version of NnfLustreMGT", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfnode_types.go b/api/v1alpha2/nnfnode_types.go new file mode 100644 index 000000000..ec0b67a08 --- /dev/null +++ b/api/v1alpha2/nnfnode_types.go @@ -0,0 +1,132 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeSpec defines the desired state of NNF Node +type NnfNodeSpec struct { + // Important: Run "make" to regenerate code after modifying this file + + // The unique name for this NNF Node + Name string `json:"name,omitempty"` + + // Pod name for this NNF Node + Pod string `json:"pod,omitempty"` + + // State reflects the desired state of this NNF Node resource + // +kubebuilder:validation:Enum=Enable;Disable + State NnfResourceStateType `json:"state"` +} + +// NnfNodeStatus defines the observed status of NNF Node +type NnfNodeStatus struct { + // Important: Run "make" to regenerate code after modifying this file + + // Status reflects the current status of the NNF Node + Status NnfResourceStatusType `json:"status,omitempty"` + + Health NnfResourceHealthType `json:"health,omitempty"` + + // Fenced is true when the NNF Node is fenced by the STONITH agent, and false otherwise. + Fenced bool `json:"fenced,omitempty"` + + // LNetNid is the LNet address for the NNF node + LNetNid string `json:"lnetNid,omitempty"` + + Capacity int64 `json:"capacity,omitempty"` + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` + + Servers []NnfServerStatus `json:"servers,omitempty"` + + Drives []NnfDriveStatus `json:"drives,omitempty"` +} + +// NnfServerStatus defines the observed status of servers connected to this NNF Node +type NnfServerStatus struct { + Hostname string `json:"hostname,omitempty"` + + NnfResourceStatus `json:",inline"` +} + +// NnfDriveStatus defines the observe status of drives connected to this NNF Node +type NnfDriveStatus struct { + // Model is the manufacturer information about the device + Model string `json:"model,omitempty"` + + // The serial number for this storage controller. + SerialNumber string `json:"serialNumber,omitempty"` + + // The firmware version of this storage controller. + FirmwareVersion string `json:"firmwareVersion,omitempty"` + + // Physical slot location of the storage controller. + Slot string `json:"slot,omitempty"` + + // Capacity in bytes of the device. The full capacity may not + // be usable depending on what the storage driver can provide. + Capacity int64 `json:"capacity,omitempty"` + + // WearLevel in percent for SSDs + WearLevel int64 `json:"wearLevel,omitempty"` + + NnfResourceStatus `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.state",description="Current desired state" +//+kubebuilder:printcolumn:name="HEALTH",type="string",JSONPath=".status.health",description="Health of node" +//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Current status of node" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="POD",type="string",JSONPath=".spec.pod",description="Parent pod name",priority=1 + +// NnfNode is the Schema for the NnfNode API +type NnfNode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeSpec `json:"spec,omitempty"` + Status NnfNodeStatus `json:"status,omitempty"` +} + +func (n *NnfNode) GetStatus() updater.Status[*NnfNodeStatus] { + return &n.Status +} + +//+kubebuilder:object:root=true + +// NnfNodeList contains a list of NNF Nodes +type NnfNodeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNode `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfNode{}, &NnfNodeList{}) +} diff --git a/api/v1alpha2/nnfnode_webhook.go b/api/v1alpha2/nnfnode_webhook.go new file mode 100644 index 000000000..a2d61a3aa --- /dev/null +++ b/api/v1alpha2/nnfnode_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfnodelog = logf.Log.WithName("nnfnode-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfNode) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfnode_webhook_test.go b/api/v1alpha2/nnfnode_webhook_test.go new file mode 100644 index 000000000..2ad7353d2 --- /dev/null +++ b/api/v1alpha2/nnfnode_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfNode Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfNode under Conversion Webhook", func() { + It("Should get the converted version of NnfNode", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfnodeblockstorage_types.go b/api/v1alpha2/nnfnodeblockstorage_types.go new file mode 100644 index 000000000..8fa02ca4b --- /dev/null +++ b/api/v1alpha2/nnfnodeblockstorage_types.go @@ -0,0 +1,137 @@ +/* + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type NnfNodeBlockStorageAllocationSpec struct { + // Aggregate capacity of the block devices for each allocation + Capacity int64 `json:"capacity,omitempty"` + + // List of nodes where /dev devices should be created + Access []string `json:"access,omitempty"` +} + +// NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. +// Storage spec are created on request of the user and fullfilled by the NNF Node Controller. +type NnfNodeBlockStorageSpec struct { + // SharedAllocation is used when a single NnfNodeBlockStorage allocation is used by multiple NnfNodeStorage allocations + SharedAllocation bool `json:"sharedAllocation"` + + // Allocations is the list of storage allocations to make + Allocations []NnfNodeBlockStorageAllocationSpec `json:"allocations,omitempty"` +} + +type NnfNodeBlockStorageStatus struct { + // Allocations is the list of storage allocations that were made + Allocations []NnfNodeBlockStorageAllocationStatus `json:"allocations,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` + + // PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did + // last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths + // listed in the status section are from the current boot of the node. + PodStartTime metav1.Time `json:"podStartTime,omitempty"` + + Ready bool `json:"ready"` +} + +type NnfNodeBlockStorageDeviceStatus struct { + // NQN of the base NVMe device + NQN string `json:"NQN"` + + // Id of the Namespace on the NVMe device (e.g., "2") + NamespaceId string `json:"namespaceId"` + + // Total capacity allocated for the storage. This may differ from the requested storage + // capacity as the system may round up to the requested capacity to satisify underlying + // storage requirements (i.e. block size / stripe size). + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` +} + +type NnfNodeBlockStorageAccessStatus struct { + // /dev paths for each of the block devices + DevicePaths []string `json:"devicePaths,omitempty"` + + // Redfish ID for the storage group + StorageGroupId string `json:"storageGroupId,omitempty"` +} + +type NnfNodeBlockStorageAllocationStatus struct { + // Accesses is a map of node name to the access status + Accesses map[string]NnfNodeBlockStorageAccessStatus `json:"accesses,omitempty"` + + // List of NVMe namespaces used by this allocation + Devices []NnfNodeBlockStorageDeviceStatus `json:"devices,omitempty"` + + // Total capacity allocated for the storage. This may differ from the requested storage + // capacity as the system may round up to the requested capacity to satisify underlying + // storage requirements (i.e. block size / stripe size). + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` + + // Redfish ID for the storage pool + StoragePoolId string `json:"storagePoolId,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +type NnfNodeBlockStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeBlockStorageSpec `json:"spec,omitempty"` + Status NnfNodeBlockStorageStatus `json:"status,omitempty"` +} + +func (ns *NnfNodeBlockStorage) GetStatus() updater.Status[*NnfNodeBlockStorageStatus] { + return &ns.Status +} + +// +kubebuilder:object:root=true + +// NnfNodeBlockStorageList contains a list of NNF Nodes +type NnfNodeBlockStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeBlockStorage `json:"items"` +} + +func (n *NnfNodeBlockStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfNodeBlockStorage{}, &NnfNodeBlockStorageList{}) +} diff --git a/api/v1alpha2/nnfnodeblockstorage_webhook.go b/api/v1alpha2/nnfnodeblockstorage_webhook.go new file mode 100644 index 000000000..e5fc30adb --- /dev/null +++ b/api/v1alpha2/nnfnodeblockstorage_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfnodeblockstoragelog = logf.Log.WithName("nnfnodeblockstorage-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfNodeBlockStorage) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfnodeblockstorage_webhook_test.go b/api/v1alpha2/nnfnodeblockstorage_webhook_test.go new file mode 100644 index 000000000..f3de6a692 --- /dev/null +++ b/api/v1alpha2/nnfnodeblockstorage_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfNodeBlockStorage Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfNodeBlockStorage under Conversion Webhook", func() { + It("Should get the converted version of NnfNodeBlockStorage", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfnodeecdata_types.go b/api/v1alpha2/nnfnodeecdata_types.go new file mode 100644 index 000000000..0460420a1 --- /dev/null +++ b/api/v1alpha2/nnfnodeecdata_types.go @@ -0,0 +1,69 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeECDataSpec defines the desired state of NnfNodeECData +type NnfNodeECDataSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// NnfNodeECDataStatus defines the observed state of NnfNodeECData +type NnfNodeECDataStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Data map[string]NnfNodeECPrivateData `json:"data,omitempty"` +} + +type NnfNodeECPrivateData map[string]string + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion + +// NnfNodeECData is the Schema for the nnfnodeecdata API +type NnfNodeECData struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeECDataSpec `json:"spec,omitempty"` + Status NnfNodeECDataStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// NnfNodeECDataList contains a list of NnfNodeECData +type NnfNodeECDataList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeECData `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfNodeECData{}, &NnfNodeECDataList{}) +} diff --git a/api/v1alpha2/nnfnodeecdata_webhook.go b/api/v1alpha2/nnfnodeecdata_webhook.go new file mode 100644 index 000000000..61246a1ef --- /dev/null +++ b/api/v1alpha2/nnfnodeecdata_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfnodeecdatalog = logf.Log.WithName("nnfnodeecdata-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfNodeECData) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfnodeecdata_webhook_test.go b/api/v1alpha2/nnfnodeecdata_webhook_test.go new file mode 100644 index 000000000..a858d6926 --- /dev/null +++ b/api/v1alpha2/nnfnodeecdata_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfNodeECData Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfNodeECData under Conversion Webhook", func() { + It("Should get the converted version of NnfNodeECData", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfnodestorage_types.go b/api/v1alpha2/nnfnodestorage_types.go new file mode 100644 index 000000000..213194990 --- /dev/null +++ b/api/v1alpha2/nnfnodestorage_types.go @@ -0,0 +1,154 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// IMPORTANT: Run "make" to regenerate code after modifying this file +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeStorageSpec defines the desired storage attributes on a NNF Node. +// Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. +type NnfNodeStorageSpec struct { + // Count is the number of allocations to make on this node. All of the allocations will + // be created with the same parameters + // +kubebuilder:validation:Minimum:=0 + Count int `json:"count"` + + // SharedAllocation is used when a single NnfNodeBlockStorage allocation is used by multiple NnfNodeStorage allocations + SharedAllocation bool `json:"sharedAllocation"` + + // Capacity of an individual allocation + Capacity int64 `json:"capacity,omitempty"` + + // User ID for file system + UserID uint32 `json:"userID"` + + // Group ID for file system + GroupID uint32 `json:"groupID"` + + // FileSystemType defines the type of the desired filesystem, or raw + // block device. + // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre + // +kubebuilder:default:=raw + FileSystemType string `json:"fileSystemType,omitempty"` + + // LustreStorageSpec describes the Lustre target created here, if + // FileSystemType specifies a Lustre target. + LustreStorage LustreStorageSpec `json:"lustreStorage,omitempty"` + + // BlockReference is an object reference to an NnfNodeBlockStorage + BlockReference corev1.ObjectReference `json:"blockReference,omitempty"` +} + +// LustreStorageSpec describes the Lustre target to be created here. +type LustreStorageSpec struct { + // FileSystemName is the fsname parameter for the Lustre filesystem. + // +kubebuilder:validation:MaxLength:=8 + FileSystemName string `json:"fileSystemName,omitempty"` + + // TargetType is the type of Lustre target to be created. + // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost + TargetType string `json:"targetType,omitempty"` + + // StartIndex is used to order a series of MDTs or OSTs. This is used only + // when creating MDT and OST targets. If count in the NnfNodeStorageSpec is more + // than 1, then StartIndex is the index of the first allocation, and the indexes + // increment from there. + // +kubebuilder:validation:Minimum:=0 + StartIndex int `json:"startIndex,omitempty"` + + // MgsAddress is the NID of the MGS to use. This is used only when + // creating MDT and OST targets. + MgsAddress string `json:"mgsAddress,omitempty"` + + // BackFs is the type of backing filesystem to use. + // +kubebuilder:validation:Enum=ldiskfs;zfs + BackFs string `json:"backFs,omitempty"` +} + +// NnfNodeStorageStatus defines the status for NnfNodeStorage +type NnfNodeStorageStatus struct { + // Allocations is the list of storage allocations that were made + Allocations []NnfNodeStorageAllocationStatus `json:"allocations,omitempty"` + + Ready bool `json:"ready,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +// NnfNodeStorageAllocationStatus defines the allocation status for each allocation in the NnfNodeStorage +type NnfNodeStorageAllocationStatus struct { + // Name of the LVM VG + VolumeGroup string `json:"volumeGroup,omitempty"` + + // Name of the LVM LV + LogicalVolume string `json:"logicalVolume,omitempty"` + + Ready bool `json:"ready,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// NnfNodeStorage is the Schema for the NnfNodeStorage API +type NnfNodeStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeStorageSpec `json:"spec,omitempty"` + Status NnfNodeStorageStatus `json:"status,omitempty"` +} + +func (ns *NnfNodeStorage) GetStatus() updater.Status[*NnfNodeStorageStatus] { + return &ns.Status +} + +//+kubebuilder:object:root=true + +// NnfNodeStorageList contains a list of NNF Nodes +type NnfNodeStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeStorage `json:"items"` +} + +func (n *NnfNodeStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfNodeStorage{}, &NnfNodeStorageList{}) +} diff --git a/api/v1alpha2/nnfnodestorage_webhook.go b/api/v1alpha2/nnfnodestorage_webhook.go new file mode 100644 index 000000000..261c49672 --- /dev/null +++ b/api/v1alpha2/nnfnodestorage_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfnodestoragelog = logf.Log.WithName("nnfnodestorage-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfNodeStorage) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfnodestorage_webhook_test.go b/api/v1alpha2/nnfnodestorage_webhook_test.go new file mode 100644 index 000000000..eaf09ab93 --- /dev/null +++ b/api/v1alpha2/nnfnodestorage_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfNodeStorage Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfNodeStorage under Conversion Webhook", func() { + It("Should get the converted version of NnfNodeStorage", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfportmanager_types.go b/api/v1alpha2/nnfportmanager_types.go new file mode 100644 index 000000000..6f39fbdc1 --- /dev/null +++ b/api/v1alpha2/nnfportmanager_types.go @@ -0,0 +1,142 @@ +/* + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfPortManagerAllocationSpec defines the desired state for a single port allocation +type NnfPortManagerAllocationSpec struct { + // Requester is an object reference to the requester of a ports. + Requester corev1.ObjectReference `json:"requester"` + + // Count is the number of desired ports the requester needs. The port manager + // will attempt to allocate this many ports. + // +kubebuilder:default:=1 + Count int `json:"count"` +} + +// NnfPortManagerSpec defines the desired state of NnfPortManager +type NnfPortManagerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // SystemConfiguration is an object reference to the system configuration. The + // Port Manager will use the available ports defined in the system configuration. + SystemConfiguration corev1.ObjectReference `json:"systemConfiguration"` + + // Allocations is a list of allocation requests that the Port Manager will attempt + // to satisfy. To request port resources from the port manager, clients should add + // an entry to the allocations. Entries must be unique. The port manager controller + // will attempt to allocate port resources for each allocation specification in the + // list. To remove an allocation and free up port resources, remove the allocation + // from the list. + Allocations []NnfPortManagerAllocationSpec `json:"allocations"` +} + +// AllocationStatus is the current status of a port requestor. A port that is in use by the respective owner +// will have a status of "InUse". A port that is freed by the owner but not yet reclaimed by the port manager +// will have a status of "Free". Any other status value indicates a failure of the port allocation. +// +kubebuilder:validation:Enum:=InUse;Free;Cooldown;InvalidConfiguration;InsufficientResources +type NnfPortManagerAllocationStatusStatus string + +const ( + NnfPortManagerAllocationStatusInUse NnfPortManagerAllocationStatusStatus = "InUse" + NnfPortManagerAllocationStatusFree NnfPortManagerAllocationStatusStatus = "Free" + NnfPortManagerAllocationStatusCooldown NnfPortManagerAllocationStatusStatus = "Cooldown" + NnfPortManagerAllocationStatusInvalidConfiguration NnfPortManagerAllocationStatusStatus = "InvalidConfiguration" + NnfPortManagerAllocationStatusInsufficientResources NnfPortManagerAllocationStatusStatus = "InsufficientResources" + // NOTE: You must ensure any new value is added to the above kubebuilder validation enum +) + +// NnfPortManagerAllocationStatus defines the allocation status of a port for a given requester. +type NnfPortManagerAllocationStatus struct { + // Requester is an object reference to the requester of the port resource, if one exists, or + // empty otherwise. + Requester *corev1.ObjectReference `json:"requester,omitempty"` + + // Ports is list of ports allocated to the owning resource. + Ports []uint16 `json:"ports,omitempty"` + + // Status is the ownership status of the port. + Status NnfPortManagerAllocationStatusStatus `json:"status"` + + // TimeUnallocated is when the port was unallocated. This is to ensure the proper cooldown + // duration. + TimeUnallocated *metav1.Time `json:"timeUnallocated,omitempty"` +} + +// PortManagerStatus is the current status of the port manager. +// +kubebuilder:validation:Enum:=Ready;SystemConfigurationNotFound +type NnfPortManagerStatusStatus string + +const ( + NnfPortManagerStatusReady NnfPortManagerStatusStatus = "Ready" + NnfPortManagerStatusSystemConfigurationNotFound NnfPortManagerStatusStatus = "SystemConfigurationNotFound" + // NOTE: You must ensure any new value is added in the above kubebuilder validation enum +) + +// NnfPortManagerStatus defines the observed state of NnfPortManager +type NnfPortManagerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Allocations is a list of port allocation status'. + Allocations []NnfPortManagerAllocationStatus `json:"allocations,omitempty"` + + // Status is the current status of the port manager. + Status NnfPortManagerStatusStatus `json:"status"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion + +// NnfPortManager is the Schema for the nnfportmanagers API +type NnfPortManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfPortManagerSpec `json:"spec,omitempty"` + Status NnfPortManagerStatus `json:"status,omitempty"` +} + +func (mgr *NnfPortManager) GetStatus() updater.Status[*NnfPortManagerStatus] { + return &mgr.Status +} + +//+kubebuilder:object:root=true + +// NnfPortManagerList contains a list of NnfPortManager +type NnfPortManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfPortManager `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfPortManager{}, &NnfPortManagerList{}) +} diff --git a/api/v1alpha2/nnfportmanager_webhook.go b/api/v1alpha2/nnfportmanager_webhook.go new file mode 100644 index 000000000..05e861241 --- /dev/null +++ b/api/v1alpha2/nnfportmanager_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfportmanagerlog = logf.Log.WithName("nnfportmanager-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfPortManager) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfportmanager_webhook_test.go b/api/v1alpha2/nnfportmanager_webhook_test.go new file mode 100644 index 000000000..8a34aee40 --- /dev/null +++ b/api/v1alpha2/nnfportmanager_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfPortManager Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfPortManager under Conversion Webhook", func() { + It("Should get the converted version of NnfPortManager", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfstorage_types.go b/api/v1alpha2/nnfstorage_types.go new file mode 100644 index 000000000..88ddca088 --- /dev/null +++ b/api/v1alpha2/nnfstorage_types.go @@ -0,0 +1,183 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + AllocationSetLabel = "nnf.cray.hpe.com/allocationset" +) + +// NnfStorageAllocationNodes identifies the node and properties of the allocation to make on that node +type NnfStorageAllocationNodes struct { + // Name of the node to make the allocation on + Name string `json:"name"` + + // Number of allocations to make on this node + Count int `json:"count"` +} + +// NnfStorageLustreSpec defines the specifications for a Lustre filesystem +type NnfStorageLustreSpec struct { + // TargetType is the type of Lustre target to be created. + // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost + TargetType string `json:"targetType,omitempty"` + + // BackFs is the type of backing filesystem to use. + // +kubebuilder:validation:Enum=ldiskfs;zfs + BackFs string `json:"backFs,omitempty"` + + // MgsAddress is the NID of the MGS when a pre-existing MGS is + // provided in the NnfStorageProfile + MgsAddress string `json:"mgsAddress,omitempty"` + + // PersistentMgsReference is a reference to a persistent storage that is providing + // the external MGS. + PersistentMgsReference corev1.ObjectReference `json:"persistentMgsReference,omitempty"` +} + +// NnfStorageAllocationSetSpec defines the details for an allocation set +type NnfStorageAllocationSetSpec struct { + // Name is a human readable label for this set of allocations (e.g., xfs) + Name string `json:"name"` + + // Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself + // may split the storage among the available drives operating in the NNF Node. + Capacity int64 `json:"capacity"` + + // Lustre specific configuration + NnfStorageLustreSpec `json:",inline"` + + // SharedAllocation shares a single block storage allocation between multiple file system allocations + // (within the same workflow) on a Rabbit + SharedAllocation bool `json:"sharedAllocation"` + + // Nodes is the list of Rabbit nodes to make allocations on + Nodes []NnfStorageAllocationNodes `json:"nodes"` +} + +// NnfStorageSpec defines the specification for requesting generic storage on a set +// of available NNF Nodes. This object is related to a #DW for NNF Storage, with the WLM +// making the determination for which NNF Nodes it wants to utilize. +type NnfStorageSpec struct { + + // FileSystemType defines the type of the desired filesystem, or raw + // block device. + // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre + // +kubebuilder:default:=raw + FileSystemType string `json:"fileSystemType,omitempty"` + + // User ID for file system + UserID uint32 `json:"userID"` + + // Group ID for file system + GroupID uint32 `json:"groupID"` + + // AllocationSets is a list of different types of storage allocations to make. Each + // AllocationSet describes an entire allocation spanning multiple Rabbits. For example, + // an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw + // block devices in a raw block configuration. + AllocationSets []NnfStorageAllocationSetSpec `json:"allocationSets"` +} + +// NnfStorageAllocationSetStatus contains the status information for an allocation set +type NnfStorageAllocationSetStatus struct { + Ready bool `json:"ready,omitempty"` + + // AllocationCount is the total number of allocations that currently + // exist + AllocationCount int `json:"allocationCount"` +} + +type NnfStorageLustreStatus struct { + // MgsAddress is the NID of the MGS. + MgsAddress string `json:"mgsAddress,omitempty"` + + // FileSystemName is the fsname parameter for the Lustre filesystem. + // +kubebuilder:validation:MaxLength:=8 + FileSystemName string `json:"fileSystemName,omitempty"` + + // LustgreMgtReference is an object reference to the NnfLustreMGT resource used + // by the NnfStorage + LustreMgtReference corev1.ObjectReference `json:"lustreMgtReference,omitempty"` +} + +// NnfStorageStatus defines the observed status of NNF Storage. +type NnfStorageStatus struct { + NnfStorageLustreStatus `json:",inline"` + + // AllocationsSets holds the status information for each of the AllocationSets + // from the spec. + AllocationSets []NnfStorageAllocationSetStatus `json:"allocationSets,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` + + // Ready reflects the status of this NNF Storage + Ready bool `json:"ready,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" + +// NnfStorage is the Schema for the storages API +type NnfStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfStorageSpec `json:"spec,omitempty"` + Status NnfStorageStatus `json:"status,omitempty"` +} + +func (s *NnfStorage) GetStatus() updater.Status[*NnfStorageStatus] { + return &s.Status +} + +//+kubebuilder:object:root=true + +// NnfStorageList contains a list of Storage +type NnfStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfStorage `json:"items"` +} + +func (n *NnfStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfStorage{}, &NnfStorageList{}) +} diff --git a/api/v1alpha2/nnfstorage_webhook.go b/api/v1alpha2/nnfstorage_webhook.go new file mode 100644 index 000000000..3e79cb717 --- /dev/null +++ b/api/v1alpha2/nnfstorage_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfstoragelog = logf.Log.WithName("nnfstorage-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfStorage) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfstorage_webhook_test.go b/api/v1alpha2/nnfstorage_webhook_test.go new file mode 100644 index 000000000..9f15e2373 --- /dev/null +++ b/api/v1alpha2/nnfstorage_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfStorage Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfStorage under Conversion Webhook", func() { + It("Should get the converted version of NnfStorage", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha2/nnfstorageprofile_types.go b/api/v1alpha2/nnfstorageprofile_types.go new file mode 100644 index 000000000..31140f029 --- /dev/null +++ b/api/v1alpha2/nnfstorageprofile_types.go @@ -0,0 +1,311 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NnfStorageProfileLustreCmdLines defines commandlines to use for mkfs, zpool, and other utilities +// for Lustre allocations. +type NnfStorageProfileLustreCmdLines struct { + // ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + // This is where you may specify zpool create options, and the virtual device (vdev) such as + // "mirror", or "draid". See zpoolconcepts(7). + ZpoolCreate string `json:"zpoolCreate,omitempty"` + + // Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + // Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + // Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + Mkfs string `json:"mkfs,omitempty"` + + // MountTarget specifies the mount command line for the lustre target. + // For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + // argument to mkfs.lustre instead. + MountTarget string `json:"mountTarget,omitempty"` +} + +// NnfStorageProfileLustreMiscOptions defines options to use for the mount library, and other utilities. +type NnfStorageProfileLustreMiscOptions struct { + // ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + // to the compute nodes in a workflow + // +kubebuilder:default:=false + ColocateComputes bool `json:"colocateComputes"` + + // Count specifies how many Lustre targets to create + // +kubebuilder:validation:Minimum:=1 + Count int `json:"count,omitempty"` + + // Scale provides a unitless value to determine how many Lustre targets to create + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=10 + Scale int `json:"scale,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` +} + +// NnfStorageProfileLustreData defines the Lustre-specific configuration +type NnfStorageProfileLustreData struct { + // CombinedMGTMDT indicates whether the MGT and MDT should be created on the same target device + // +kubebuilder:default:=false + CombinedMGTMDT bool `json:"combinedMgtMdt,omitempty"` + + // ExternalMGS specifies the use of an existing MGS rather than creating one. This can + // be either the NID(s) of a pre-existing MGS that should be used, or it can be an NNF Persistent + // Instance that was created with the "StandaloneMGTPoolName" option. In the latter case, the format + // is "pool:poolName" where "poolName" is the argument from "StandaloneMGTPoolName". A single MGS will + // be picked from the pool. + ExternalMGS string `json:"externalMgs,omitempty"` + + // CapacityMGT specifies the size of the MGT device. + // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" + // +kubebuilder:default:="5GiB" + CapacityMGT string `json:"capacityMgt,omitempty"` + + // CapacityMDT specifies the size of the MDT device. This is also + // used for a combined MGT+MDT device. + // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" + // +kubebuilder:default:="5GiB" + CapacityMDT string `json:"capacityMdt,omitempty"` + + // ExclusiveMDT indicates that the MDT should not be colocated with any other target on the chosen server. + // +kubebuilder:default:=false + ExclusiveMDT bool `json:"exclusiveMdt,omitempty"` + + // CapacityScalingFactor is a scaling factor for the OST capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` + + // StandaloneMGTPoolName creates a Lustre MGT without a MDT or OST. This option can only be used when creating + // a persistent Lustre instance. The MGS is placed into a named pool that can be used by the "ExternalMGS" option. + // Multiple pools can be created. + StandaloneMGTPoolName string `json:"standaloneMgtPoolName,omitempty"` + + // MgtCmdLines contains commands to create an MGT target. + MgtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtCommandlines,omitempty"` + + // MdtCmdLines contains commands to create an MDT target. + MdtCmdLines NnfStorageProfileLustreCmdLines `json:"mdtCommandlines,omitempty"` + + // MgtMdtCmdLines contains commands to create a combined MGT/MDT target. + MgtMdtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtMdtCommandlines,omitempty"` + + // OstCmdLines contains commands to create an OST target. + OstCmdLines NnfStorageProfileLustreCmdLines `json:"ostCommandlines,omitempty"` + + // MgtOptions contains options to use for libraries used for an MGT target. + MgtOptions NnfStorageProfileLustreMiscOptions `json:"mgtOptions,omitempty"` + + // MdtOptions contains options to use for libraries used for an MDT target. + MdtOptions NnfStorageProfileLustreMiscOptions `json:"mdtOptions,omitempty"` + + // MgtMdtOptions contains options to use for libraries used for a combined MGT/MDT target. + MgtMdtOptions NnfStorageProfileLustreMiscOptions `json:"mgtMdtOptions,omitempty"` + + // OstOptions contains options to use for libraries used for an OST target. + OstOptions NnfStorageProfileLustreMiscOptions `json:"ostOptions,omitempty"` + + // MountRabbit specifies mount options for making the Lustre client mount on the Rabbit. + MountRabbit string `json:"mountRabbit,omitempty"` + + // MountCompute specifies mount options for making the Lustre client mount on the Compute. + MountCompute string `json:"mountCompute,omitempty"` +} + +// NnfStorageProfileCmdLines defines commandlines to use for mkfs, and other utilities for storage +// allocations that use LVM and a simple file system type (e.g., gfs2) +type NnfStorageProfileCmdLines struct { + // Mkfs specifies the mkfs commandline, minus the "mkfs". + Mkfs string `json:"mkfs,omitempty"` + + // SharedVg specifies that allocations from a workflow on the same Rabbit should share an + // LVM VolumeGroup + // +kubebuilder:default:=false + SharedVg bool `json:"sharedVg,omitempty"` + + // PvCreate specifies the pvcreate commandline, minus the "pvcreate". + PvCreate string `json:"pvCreate,omitempty"` + + // PvRemove specifies the pvremove commandline, minus the "pvremove". + PvRemove string `json:"pvRemove,omitempty"` + + // VgCreate specifies the vgcreate commandline, minus the "vgcreate". + VgCreate string `json:"vgCreate,omitempty"` + + // VgChange specifies the various vgchange commandlines, minus the "vgchange" + VgChange NnfStorageProfileLVMVgChangeCmdLines `json:"vgChange,omitempty"` + + // VgCreate specifies the vgcreate commandline, minus the "vgremove". + VgRemove string `json:"vgRemove,omitempty"` + + // LvCreate specifies the lvcreate commandline, minus the "lvcreate". + LvCreate string `json:"lvCreate,omitempty"` + + // LvChange specifies the various lvchange commandlines, minus the "lvchange" + LvChange NnfStorageProfileLVMLvChangeCmdLines `json:"lvChange,omitempty"` + + // LvRemove specifies the lvcreate commandline, minus the "lvremove". + LvRemove string `json:"lvRemove,omitempty"` + + // MountRabbit specifies mount options for mounting on the Rabbit. + MountRabbit string `json:"mountRabbit,omitempty"` + + // MountCompute specifies mount options for mounting on the Compute. + MountCompute string `json:"mountCompute,omitempty"` +} + +// NnfStorageProfileLVMVgChangeCmdLines +type NnfStorageProfileLVMVgChangeCmdLines struct { + // The vgchange commandline for lockStart, minus the "vgchange" command + LockStart string `json:"lockStart,omitempty"` + + // The vgchange commandline for lockStop, minus the "vgchange" command + LockStop string `json:"lockStop,omitempty"` +} + +// NnfStorageProfileLVMVgChangeCmdLines +type NnfStorageProfileLVMLvChangeCmdLines struct { + // The lvchange commandline for activate, minus the "lvchange" command + Activate string `json:"activate,omitempty"` + + // The lvchange commandline for deactivate, minus the "lvchange" command + Deactivate string `json:"deactivate,omitempty"` +} + +// NnfStorageProfileGFS2Data defines the GFS2-specific configuration +type NnfStorageProfileGFS2Data struct { + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` + + // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` +} + +// NnfStorageProfileXFSData defines the XFS-specific configuration +type NnfStorageProfileXFSData struct { + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` + + // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` +} + +// NnfStorageProfileRawData defines the Raw-specific configuration +type NnfStorageProfileRawData struct { + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` + + // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` +} + +// NnfStorageProfileData defines the desired state of NnfStorageProfile +type NnfStorageProfileData struct { + + // Default is true if this instance is the default resource to use + // +kubebuilder:default:=false + Default bool `json:"default,omitempty"` + + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // LustreStorage defines the Lustre-specific configuration + LustreStorage NnfStorageProfileLustreData `json:"lustreStorage"` + + // GFS2Storage defines the GFS2-specific configuration + GFS2Storage NnfStorageProfileGFS2Data `json:"gfs2Storage"` + + // XFSStorage defines the XFS-specific configuration + XFSStorage NnfStorageProfileXFSData `json:"xfsStorage"` + + // RawStorage defines the Raw-specific configuration + RawStorage NnfStorageProfileRawData `json:"rawStorage"` +} + +//+kubebuilder:object:root=true +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// NnfStorageProfile is the Schema for the nnfstorageprofiles API +type NnfStorageProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Data NnfStorageProfileData `json:"data,omitempty"` +} + +//+kubebuilder:object:root=true +// +kubebuilder:storageversion + +// NnfStorageProfileList contains a list of NnfStorageProfile +type NnfStorageProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfStorageProfile `json:"items"` +} + +func (n *NnfStorageProfile) GetLustreMiscOptions(target string) NnfStorageProfileLustreMiscOptions { + switch target { + case "mgt": + return n.Data.LustreStorage.MgtOptions + case "mdt": + return n.Data.LustreStorage.MdtOptions + case "mgtmdt": + return n.Data.LustreStorage.MgtMdtOptions + case "ost": + return n.Data.LustreStorage.OstOptions + default: + panic("Invalid target type") + } +} + +func (n *NnfStorageProfileList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfStorageProfile{}, &NnfStorageProfileList{}) +} diff --git a/api/v1alpha1/nnfstorageprofile_webhook.go b/api/v1alpha2/nnfstorageprofile_webhook.go similarity index 84% rename from api/v1alpha1/nnfstorageprofile_webhook.go rename to api/v1alpha2/nnfstorageprofile_webhook.go index 6e8cb43fa..05eb02d50 100644 --- a/api/v1alpha1/nnfstorageprofile_webhook.go +++ b/api/v1alpha2/nnfstorageprofile_webhook.go @@ -17,13 +17,12 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "fmt" "os" "reflect" - "strings" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -44,7 +43,7 @@ func (r *NnfStorageProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha1-nnfstorageprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=create;update,versions=v1alpha1,name=vnnfstorageprofile.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha2-nnfstorageprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=create;update,versions=v1alpha2,name=vnnfstorageprofile.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &NnfStorageProfile{} @@ -143,34 +142,3 @@ func (r *NnfStorageProfile) validateLustreTargetMiscOptions(targetMiscOptions Nn return nil } - -type VarHandler struct { - VarMap map[string]string -} - -func NewVarHandler(vars map[string]string) *VarHandler { - v := &VarHandler{} - v.VarMap = vars - return v -} - -// ListToVars splits the value of one of its variables, and creates a new -// indexed variable for each of the items in the split. -func (v *VarHandler) ListToVars(listVarName, newVarPrefix string) error { - theList, ok := v.VarMap[listVarName] - if !ok { - return fmt.Errorf("Unable to find the variable named %s", listVarName) - } - - for i, val := range strings.Split(theList, " ") { - v.VarMap[fmt.Sprintf("%s%d", newVarPrefix, i+1)] = val - } - return nil -} - -func (v *VarHandler) ReplaceAll(s string) string { - for key, value := range v.VarMap { - s = strings.ReplaceAll(s, key, value) - } - return s -} diff --git a/api/v1alpha1/nnfstorageprofile_webhook_test.go b/api/v1alpha2/nnfstorageprofile_webhook_test.go similarity index 99% rename from api/v1alpha1/nnfstorageprofile_webhook_test.go rename to api/v1alpha2/nnfstorageprofile_webhook_test.go index 1841ae4ab..4c952aff7 100644 --- a/api/v1alpha1/nnfstorageprofile_webhook_test.go +++ b/api/v1alpha2/nnfstorageprofile_webhook_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "context" diff --git a/api/v1alpha2/nnfsystemstorage_types.go b/api/v1alpha2/nnfsystemstorage_types.go new file mode 100644 index 000000000..6b114894d --- /dev/null +++ b/api/v1alpha2/nnfsystemstorage_types.go @@ -0,0 +1,142 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type NnfSystemStorageComputesTarget string + +const ( + ComputesTargetAll NnfSystemStorageComputesTarget = "all" + ComputesTargetEven NnfSystemStorageComputesTarget = "even" + ComputesTargetOdd NnfSystemStorageComputesTarget = "odd" + ComputesTargetPattern NnfSystemStorageComputesTarget = "pattern" +) + +// NnfSystemStorageSpec defines the desired state of NnfSystemStorage +type NnfSystemStorageSpec struct { + // SystemConfiguration is an object reference to the SystemConfiguration resource to use. If this + // field is empty, name: default namespace: default is used. + SystemConfiguration corev1.ObjectReference `json:"systemConfiguration,omitempty"` + + // ExludeRabbits is a list of Rabbits to exclude from the Rabbits in the SystemConfiguration + ExcludeRabbits []string `json:"excludeRabbits,omitempty"` + + // IncludeRabbits is a list of Rabbits to use rather than getting the list of Rabbits from the + // SystemConfiguration + IncludeRabbits []string `json:"includeRabbits,omitempty"` + + // ExcludeDisabledRabbits looks at the Storage resource for a Rabbit and does not use it if it's + // marked as "disabled" + // +kubebuilder:default:=false + ExcludeDisabledRabbits bool `json:"excludeDisabledRabbits,omitempty"` + + // ExcludeComputes is a list of compute nodes to exclude from the the compute nodes listed in the + // SystemConfiguration + ExcludeComputes []string `json:"excludeComputes,omitempty"` + + // IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes + // from the SystemConfiguration + IncludeComputes []string `json:"includeComputes,omitempty"` + + // ComputesTarget specifies which computes to make the storage accessible to + // +kubebuilder:validation:Enum=all;even;odd;pattern + // +kubebuilder:default:=all + ComputesTarget NnfSystemStorageComputesTarget `json:"computesTarget,omitempty"` + + // ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This + // is only used if ComputesTarget is "pattern" + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:items:Maximum=15 + // +kubebuilder:validation:items:Minimum=0 + ComputesPattern []int `json:"computesPattern,omitempty"` + + // Capacity is the allocation size on each Rabbit + // +kubebuilder:default:=1073741824 + Capacity int64 `json:"capacity"` + + // Type is the file system type to use for the storage allocation + // +kubebuilder:validation:Enum=raw;xfs;gfs2 + // +kubebuilder:default:=raw + Type string `json:"type,omitempty"` + + // StorageProfile is an object reference to the storage profile to use + StorageProfile corev1.ObjectReference `json:"storageProfile"` + + // MakeClientMounts specifies whether to make ClientMount resources or just + // make the devices available to the client + // +kubebuilder:default:=false + MakeClientMounts bool `json:"makeClientMounts"` + + // ClientMountPath is an optional path for where to mount the file system on the computes + ClientMountPath string `json:"clientMountPath,omitempty"` +} + +// NnfSystemStorageStatus defines the observed state of NnfSystemStorage +type NnfSystemStorageStatus struct { + // Ready signifies whether all work has been completed + Ready bool `json:"ready"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// NnfSystemStorage is the Schema for the nnfsystemstorages API +type NnfSystemStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfSystemStorageSpec `json:"spec,omitempty"` + Status NnfSystemStorageStatus `json:"status,omitempty"` +} + +func (a *NnfSystemStorage) GetStatus() updater.Status[*NnfSystemStorageStatus] { + return &a.Status +} + +// +kubebuilder:object:root=true +// NnfSystemStorageList contains a list of NnfSystemStorage +type NnfSystemStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfSystemStorage `json:"items"` +} + +func (n *NnfSystemStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfSystemStorage{}, &NnfSystemStorageList{}) +} diff --git a/api/v1alpha2/nnfsystemstorage_webhook.go b/api/v1alpha2/nnfsystemstorage_webhook.go new file mode 100644 index 000000000..31be8165f --- /dev/null +++ b/api/v1alpha2/nnfsystemstorage_webhook.go @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var nnfsystemstoragelog = logf.Log.WithName("nnfsystemstorage-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *NnfSystemStorage) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/api/v1alpha2/nnfsystemstorage_webhook_test.go b/api/v1alpha2/nnfsystemstorage_webhook_test.go new file mode 100644 index 000000000..a21cfd663 --- /dev/null +++ b/api/v1alpha2/nnfsystemstorage_webhook_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("NnfSystemStorage Webhook", func() { + + // We already have api//conversion_test.go that is + // digging deep into the conversion routines, and we have + // internal/controllers/conversion_test.go that is verifying that the + // conversion webhook is hooked up to those routines. + + Context("When creating NnfSystemStorage under Conversion Webhook", func() { + It("Should get the converted version of NnfSystemStorage", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha2/webhook_suite_test.go similarity index 99% rename from api/v1alpha1/webhook_suite_test.go rename to api/v1alpha2/webhook_suite_test.go index 24967f768..21d46d6fb 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha2/webhook_suite_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "context" diff --git a/api/v1alpha2/workflow_helpers.go b/api/v1alpha2/workflow_helpers.go new file mode 100644 index 000000000..6c230cfce --- /dev/null +++ b/api/v1alpha2/workflow_helpers.go @@ -0,0 +1,73 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha2 + +const ( + // DirectiveIndexLabel is a label applied to child objects of the workflow + // to show which directive they were created for. This is useful during deletion + // to filter the child objects by the directive index and only delete the + // resources for the directive being processed + DirectiveIndexLabel = "nnf.cray.hpe.com/directive_index" + + // TargetDirectiveIndexLabel is used for ClientMount resources to indicate the + // directive index of the storage they're targeting. + TargetDirectiveIndexLabel = "nnf.cray.hpe.com/target_directive_index" + + // TargetOwnerUidLabel is used for ClientMount resources to indicate the UID of the + // parent NnfStorage it's targeting + TargetOwnerUidLabel = "nnf.cray.hpe.com/target_owner_uid" + + // PinnedStorageProfileLabelName is a label applied to NnfStorage objects to show + // which pinned storage profile is being used. + PinnedStorageProfileLabelName = "nnf.cray.hpe.com/pinned_storage_profile_name" + + // PinnedStorageProfileLabelNameSpace is a label applied to NnfStorage objects to show + // which pinned storage profile is being used. + PinnedStorageProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_storage_profile_namespace" + + // PinnedContainerProfileLabelName is a label applied to NnfStorage objects to show + // which pinned container profile is being used. + PinnedContainerProfileLabelName = "nnf.cray.hpe.com/pinned_container_profile_name" + + // PinnedContainerProfileLabelNameSpace is a label applied to NnfStorage objects to show + // which pinned container profile is being used. + PinnedContainerProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_container_profile_namespace" + + // StandaloneMGTLabel is a label applied to the PersistentStorageInstance to show that + // it is for a Lustre MGT only. The value for the label is the pool name. + StandaloneMGTLabel = "nnf.cray.hpe.com/standalone_mgt" + + // RabbitNodeSelectorLabel is a label applied to each k8s Node that is a Rabbit. + // It is used for scheduling NLCs onto the rabbits. + // (This is left in its legacy form because so many existing services are + // using it in their nodeSelector.) + RabbitNodeSelectorLabel = "cray.nnf.node" + + // TaintsAndLabelsCompletedLabel is a label applied to each k8s Node that is a Rabbit. + // It is used to indicate that the node has completed the process of applying + // the taints and labels that mark it as a rabbit. + TaintsAndLabelsCompletedLabel = "nnf.cray.hpe.com/taints_and_labels_completed" + + // RabbitNodeTaintKey is a taint key applied to each k8s Node that is a Rabbit. + // It is used for scheduling NLCs onto the rabbits. + // (This is left in its legacy form to avoid having existing clusters, + // which already have this taint, grind to a halt.) + RabbitNodeTaintKey = "cray.nnf.node" +) diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..29ca4f87e --- /dev/null +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,2022 @@ +//go:build !ignore_autogenerated + +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreStorageSpec) DeepCopyInto(out *LustreStorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreStorageSpec. +func (in *LustreStorageSpec) DeepCopy() *LustreStorageSpec { + if in == nil { + return nil + } + out := new(LustreStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccess) DeepCopyInto(out *NnfAccess) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccess. +func (in *NnfAccess) DeepCopy() *NnfAccess { + if in == nil { + return nil + } + out := new(NnfAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfAccess) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccessList) DeepCopyInto(out *NnfAccessList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfAccess, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessList. +func (in *NnfAccessList) DeepCopy() *NnfAccessList { + if in == nil { + return nil + } + out := new(NnfAccessList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfAccessList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccessSpec) DeepCopyInto(out *NnfAccessSpec) { + *out = *in + out.ClientReference = in.ClientReference + out.StorageReference = in.StorageReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessSpec. +func (in *NnfAccessSpec) DeepCopy() *NnfAccessSpec { + if in == nil { + return nil + } + out := new(NnfAccessSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccessStatus) DeepCopyInto(out *NnfAccessStatus) { + *out = *in + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessStatus. +func (in *NnfAccessStatus) DeepCopy() *NnfAccessStatus { + if in == nil { + return nil + } + out := new(NnfAccessStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfile) DeepCopyInto(out *NnfContainerProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfile. +func (in *NnfContainerProfile) DeepCopy() *NnfContainerProfile { + if in == nil { + return nil + } + out := new(NnfContainerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfContainerProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileData) DeepCopyInto(out *NnfContainerProfileData) { + *out = *in + if in.Storages != nil { + in, out := &in.Storages, &out.Storages + *out = make([]NnfContainerProfileStorage, len(*in)) + copy(*out, *in) + } + if in.PreRunTimeoutSeconds != nil { + in, out := &in.PreRunTimeoutSeconds, &out.PreRunTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.PostRunTimeoutSeconds != nil { + in, out := &in.PostRunTimeoutSeconds, &out.PostRunTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(uint32) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(uint32) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(v1.PodSpec) + (*in).DeepCopyInto(*out) + } + if in.MPISpec != nil { + in, out := &in.MPISpec, &out.MPISpec + *out = new(v2beta1.MPIJobSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileData. +func (in *NnfContainerProfileData) DeepCopy() *NnfContainerProfileData { + if in == nil { + return nil + } + out := new(NnfContainerProfileData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileList) DeepCopyInto(out *NnfContainerProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfContainerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileList. +func (in *NnfContainerProfileList) DeepCopy() *NnfContainerProfileList { + if in == nil { + return nil + } + out := new(NnfContainerProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfContainerProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileStorage) DeepCopyInto(out *NnfContainerProfileStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileStorage. +func (in *NnfContainerProfileStorage) DeepCopy() *NnfContainerProfileStorage { + if in == nil { + return nil + } + out := new(NnfContainerProfileStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovement) DeepCopyInto(out *NnfDataMovement) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovement. +func (in *NnfDataMovement) DeepCopy() *NnfDataMovement { + if in == nil { + return nil + } + out := new(NnfDataMovement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovement) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementCommandStatus) DeepCopyInto(out *NnfDataMovementCommandStatus) { + *out = *in + out.ElapsedTime = in.ElapsedTime + if in.ProgressPercentage != nil { + in, out := &in.ProgressPercentage, &out.ProgressPercentage + *out = new(int32) + **out = **in + } + in.LastMessageTime.DeepCopyInto(&out.LastMessageTime) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(int32) + **out = **in + } + if in.Directories != nil { + in, out := &in.Directories, &out.Directories + *out = new(int32) + **out = **in + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = new(int32) + **out = **in + } + if in.Links != nil { + in, out := &in.Links, &out.Links + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementCommandStatus. +func (in *NnfDataMovementCommandStatus) DeepCopy() *NnfDataMovementCommandStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementCommandStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementConfig) DeepCopyInto(out *NnfDataMovementConfig) { + *out = *in + if in.Slots != nil { + in, out := &in.Slots, &out.Slots + *out = new(int) + **out = **in + } + if in.MaxSlots != nil { + in, out := &in.MaxSlots, &out.MaxSlots + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementConfig. +func (in *NnfDataMovementConfig) DeepCopy() *NnfDataMovementConfig { + if in == nil { + return nil + } + out := new(NnfDataMovementConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementList) DeepCopyInto(out *NnfDataMovementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementList. +func (in *NnfDataMovementList) DeepCopy() *NnfDataMovementList { + if in == nil { + return nil + } + out := new(NnfDataMovementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManager) DeepCopyInto(out *NnfDataMovementManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManager. +func (in *NnfDataMovementManager) DeepCopy() *NnfDataMovementManager { + if in == nil { + return nil + } + out := new(NnfDataMovementManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManagerList) DeepCopyInto(out *NnfDataMovementManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovementManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerList. +func (in *NnfDataMovementManagerList) DeepCopy() *NnfDataMovementManagerList { + if in == nil { + return nil + } + out := new(NnfDataMovementManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManagerSpec) DeepCopyInto(out *NnfDataMovementManagerSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerSpec. +func (in *NnfDataMovementManagerSpec) DeepCopy() *NnfDataMovementManagerSpec { + if in == nil { + return nil + } + out := new(NnfDataMovementManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManagerStatus) DeepCopyInto(out *NnfDataMovementManagerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerStatus. +func (in *NnfDataMovementManagerStatus) DeepCopy() *NnfDataMovementManagerStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfile) DeepCopyInto(out *NnfDataMovementProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Data = in.Data +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfile. +func (in *NnfDataMovementProfile) DeepCopy() *NnfDataMovementProfile { + if in == nil { + return nil + } + out := new(NnfDataMovementProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfileData) DeepCopyInto(out *NnfDataMovementProfileData) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileData. +func (in *NnfDataMovementProfileData) DeepCopy() *NnfDataMovementProfileData { + if in == nil { + return nil + } + out := new(NnfDataMovementProfileData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfileList) DeepCopyInto(out *NnfDataMovementProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovementProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileList. +func (in *NnfDataMovementProfileList) DeepCopy() *NnfDataMovementProfileList { + if in == nil { + return nil + } + out := new(NnfDataMovementProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementSpec) DeepCopyInto(out *NnfDataMovementSpec) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(NnfDataMovementSpecSourceDestination) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(NnfDataMovementSpecSourceDestination) + **out = **in + } + out.ProfileReference = in.ProfileReference + if in.UserConfig != nil { + in, out := &in.UserConfig, &out.UserConfig + *out = new(NnfDataMovementConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpec. +func (in *NnfDataMovementSpec) DeepCopy() *NnfDataMovementSpec { + if in == nil { + return nil + } + out := new(NnfDataMovementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementSpecSourceDestination) DeepCopyInto(out *NnfDataMovementSpecSourceDestination) { + *out = *in + out.StorageReference = in.StorageReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpecSourceDestination. +func (in *NnfDataMovementSpecSourceDestination) DeepCopy() *NnfDataMovementSpecSourceDestination { + if in == nil { + return nil + } + out := new(NnfDataMovementSpecSourceDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementStatus) DeepCopyInto(out *NnfDataMovementStatus) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.CommandStatus != nil { + in, out := &in.CommandStatus, &out.CommandStatus + *out = new(NnfDataMovementCommandStatus) + (*in).DeepCopyInto(*out) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementStatus. +func (in *NnfDataMovementStatus) DeepCopy() *NnfDataMovementStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDriveStatus) DeepCopyInto(out *NnfDriveStatus) { + *out = *in + out.NnfResourceStatus = in.NnfResourceStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDriveStatus. +func (in *NnfDriveStatus) DeepCopy() *NnfDriveStatus { + if in == nil { + return nil + } + out := new(NnfDriveStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGT) DeepCopyInto(out *NnfLustreMGT) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGT. +func (in *NnfLustreMGT) DeepCopy() *NnfLustreMGT { + if in == nil { + return nil + } + out := new(NnfLustreMGT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfLustreMGT) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTList) DeepCopyInto(out *NnfLustreMGTList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfLustreMGT, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTList. +func (in *NnfLustreMGTList) DeepCopy() *NnfLustreMGTList { + if in == nil { + return nil + } + out := new(NnfLustreMGTList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfLustreMGTList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTSpec) DeepCopyInto(out *NnfLustreMGTSpec) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FsNameBlackList != nil { + in, out := &in.FsNameBlackList, &out.FsNameBlackList + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.FsNameStartReference = in.FsNameStartReference + if in.ClaimList != nil { + in, out := &in.ClaimList, &out.ClaimList + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTSpec. +func (in *NnfLustreMGTSpec) DeepCopy() *NnfLustreMGTSpec { + if in == nil { + return nil + } + out := new(NnfLustreMGTSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTStatus) DeepCopyInto(out *NnfLustreMGTStatus) { + *out = *in + if in.ClaimList != nil { + in, out := &in.ClaimList, &out.ClaimList + *out = make([]NnfLustreMGTStatusClaim, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatus. +func (in *NnfLustreMGTStatus) DeepCopy() *NnfLustreMGTStatus { + if in == nil { + return nil + } + out := new(NnfLustreMGTStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTStatusClaim) DeepCopyInto(out *NnfLustreMGTStatusClaim) { + *out = *in + out.Reference = in.Reference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatusClaim. +func (in *NnfLustreMGTStatusClaim) DeepCopy() *NnfLustreMGTStatusClaim { + if in == nil { + return nil + } + out := new(NnfLustreMGTStatusClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNode) DeepCopyInto(out *NnfNode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNode. +func (in *NnfNode) DeepCopy() *NnfNode { + if in == nil { + return nil + } + out := new(NnfNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorage) DeepCopyInto(out *NnfNodeBlockStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorage. +func (in *NnfNodeBlockStorage) DeepCopy() *NnfNodeBlockStorage { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeBlockStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAccessStatus) DeepCopyInto(out *NnfNodeBlockStorageAccessStatus) { + *out = *in + if in.DevicePaths != nil { + in, out := &in.DevicePaths, &out.DevicePaths + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAccessStatus. +func (in *NnfNodeBlockStorageAccessStatus) DeepCopy() *NnfNodeBlockStorageAccessStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAccessStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAllocationSpec) DeepCopyInto(out *NnfNodeBlockStorageAllocationSpec) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationSpec. +func (in *NnfNodeBlockStorageAllocationSpec) DeepCopy() *NnfNodeBlockStorageAllocationSpec { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAllocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAllocationStatus) DeepCopyInto(out *NnfNodeBlockStorageAllocationStatus) { + *out = *in + if in.Accesses != nil { + in, out := &in.Accesses, &out.Accesses + *out = make(map[string]NnfNodeBlockStorageAccessStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]NnfNodeBlockStorageDeviceStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationStatus. +func (in *NnfNodeBlockStorageAllocationStatus) DeepCopy() *NnfNodeBlockStorageAllocationStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAllocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageDeviceStatus) DeepCopyInto(out *NnfNodeBlockStorageDeviceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageDeviceStatus. +func (in *NnfNodeBlockStorageDeviceStatus) DeepCopy() *NnfNodeBlockStorageDeviceStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageList) DeepCopyInto(out *NnfNodeBlockStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeBlockStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageList. +func (in *NnfNodeBlockStorageList) DeepCopy() *NnfNodeBlockStorageList { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeBlockStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageSpec) DeepCopyInto(out *NnfNodeBlockStorageSpec) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeBlockStorageAllocationSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageSpec. +func (in *NnfNodeBlockStorageSpec) DeepCopy() *NnfNodeBlockStorageSpec { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageStatus) DeepCopyInto(out *NnfNodeBlockStorageStatus) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeBlockStorageAllocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ResourceError.DeepCopyInto(&out.ResourceError) + in.PodStartTime.DeepCopyInto(&out.PodStartTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageStatus. +func (in *NnfNodeBlockStorageStatus) DeepCopy() *NnfNodeBlockStorageStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECData) DeepCopyInto(out *NnfNodeECData) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECData. +func (in *NnfNodeECData) DeepCopy() *NnfNodeECData { + if in == nil { + return nil + } + out := new(NnfNodeECData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeECData) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECDataList) DeepCopyInto(out *NnfNodeECDataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeECData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataList. +func (in *NnfNodeECDataList) DeepCopy() *NnfNodeECDataList { + if in == nil { + return nil + } + out := new(NnfNodeECDataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeECDataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECDataSpec) DeepCopyInto(out *NnfNodeECDataSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataSpec. +func (in *NnfNodeECDataSpec) DeepCopy() *NnfNodeECDataSpec { + if in == nil { + return nil + } + out := new(NnfNodeECDataSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECDataStatus) DeepCopyInto(out *NnfNodeECDataStatus) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]NnfNodeECPrivateData, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(NnfNodeECPrivateData, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataStatus. +func (in *NnfNodeECDataStatus) DeepCopy() *NnfNodeECDataStatus { + if in == nil { + return nil + } + out := new(NnfNodeECDataStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in NnfNodeECPrivateData) DeepCopyInto(out *NnfNodeECPrivateData) { + { + in := &in + *out = make(NnfNodeECPrivateData, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECPrivateData. +func (in NnfNodeECPrivateData) DeepCopy() NnfNodeECPrivateData { + if in == nil { + return nil + } + out := new(NnfNodeECPrivateData) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeList) DeepCopyInto(out *NnfNodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeList. +func (in *NnfNodeList) DeepCopy() *NnfNodeList { + if in == nil { + return nil + } + out := new(NnfNodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeSpec) DeepCopyInto(out *NnfNodeSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeSpec. +func (in *NnfNodeSpec) DeepCopy() *NnfNodeSpec { + if in == nil { + return nil + } + out := new(NnfNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStatus) DeepCopyInto(out *NnfNodeStatus) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]NnfServerStatus, len(*in)) + copy(*out, *in) + } + if in.Drives != nil { + in, out := &in.Drives, &out.Drives + *out = make([]NnfDriveStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStatus. +func (in *NnfNodeStatus) DeepCopy() *NnfNodeStatus { + if in == nil { + return nil + } + out := new(NnfNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorage) DeepCopyInto(out *NnfNodeStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorage. +func (in *NnfNodeStorage) DeepCopy() *NnfNodeStorage { + if in == nil { + return nil + } + out := new(NnfNodeStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageAllocationStatus) DeepCopyInto(out *NnfNodeStorageAllocationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageAllocationStatus. +func (in *NnfNodeStorageAllocationStatus) DeepCopy() *NnfNodeStorageAllocationStatus { + if in == nil { + return nil + } + out := new(NnfNodeStorageAllocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageList) DeepCopyInto(out *NnfNodeStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageList. +func (in *NnfNodeStorageList) DeepCopy() *NnfNodeStorageList { + if in == nil { + return nil + } + out := new(NnfNodeStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageSpec) DeepCopyInto(out *NnfNodeStorageSpec) { + *out = *in + out.LustreStorage = in.LustreStorage + out.BlockReference = in.BlockReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageSpec. +func (in *NnfNodeStorageSpec) DeepCopy() *NnfNodeStorageSpec { + if in == nil { + return nil + } + out := new(NnfNodeStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageStatus) DeepCopyInto(out *NnfNodeStorageStatus) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeStorageAllocationStatus, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageStatus. +func (in *NnfNodeStorageStatus) DeepCopy() *NnfNodeStorageStatus { + if in == nil { + return nil + } + out := new(NnfNodeStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManager) DeepCopyInto(out *NnfPortManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManager. +func (in *NnfPortManager) DeepCopy() *NnfPortManager { + if in == nil { + return nil + } + out := new(NnfPortManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfPortManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerAllocationSpec) DeepCopyInto(out *NnfPortManagerAllocationSpec) { + *out = *in + out.Requester = in.Requester +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationSpec. +func (in *NnfPortManagerAllocationSpec) DeepCopy() *NnfPortManagerAllocationSpec { + if in == nil { + return nil + } + out := new(NnfPortManagerAllocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerAllocationStatus) DeepCopyInto(out *NnfPortManagerAllocationStatus) { + *out = *in + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(v1.ObjectReference) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]uint16, len(*in)) + copy(*out, *in) + } + if in.TimeUnallocated != nil { + in, out := &in.TimeUnallocated, &out.TimeUnallocated + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationStatus. +func (in *NnfPortManagerAllocationStatus) DeepCopy() *NnfPortManagerAllocationStatus { + if in == nil { + return nil + } + out := new(NnfPortManagerAllocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerList) DeepCopyInto(out *NnfPortManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfPortManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerList. +func (in *NnfPortManagerList) DeepCopy() *NnfPortManagerList { + if in == nil { + return nil + } + out := new(NnfPortManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfPortManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerSpec) DeepCopyInto(out *NnfPortManagerSpec) { + *out = *in + out.SystemConfiguration = in.SystemConfiguration + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfPortManagerAllocationSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerSpec. +func (in *NnfPortManagerSpec) DeepCopy() *NnfPortManagerSpec { + if in == nil { + return nil + } + out := new(NnfPortManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerStatus) DeepCopyInto(out *NnfPortManagerStatus) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfPortManagerAllocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerStatus. +func (in *NnfPortManagerStatus) DeepCopy() *NnfPortManagerStatus { + if in == nil { + return nil + } + out := new(NnfPortManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfResourceStatus) DeepCopyInto(out *NnfResourceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfResourceStatus. +func (in *NnfResourceStatus) DeepCopy() *NnfResourceStatus { + if in == nil { + return nil + } + out := new(NnfResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfServerStatus) DeepCopyInto(out *NnfServerStatus) { + *out = *in + out.NnfResourceStatus = in.NnfResourceStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfServerStatus. +func (in *NnfServerStatus) DeepCopy() *NnfServerStatus { + if in == nil { + return nil + } + out := new(NnfServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorage) DeepCopyInto(out *NnfStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorage. +func (in *NnfStorage) DeepCopy() *NnfStorage { + if in == nil { + return nil + } + out := new(NnfStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageAllocationNodes) DeepCopyInto(out *NnfStorageAllocationNodes) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationNodes. +func (in *NnfStorageAllocationNodes) DeepCopy() *NnfStorageAllocationNodes { + if in == nil { + return nil + } + out := new(NnfStorageAllocationNodes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageAllocationSetSpec) DeepCopyInto(out *NnfStorageAllocationSetSpec) { + *out = *in + out.NnfStorageLustreSpec = in.NnfStorageLustreSpec + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]NnfStorageAllocationNodes, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationSetSpec. +func (in *NnfStorageAllocationSetSpec) DeepCopy() *NnfStorageAllocationSetSpec { + if in == nil { + return nil + } + out := new(NnfStorageAllocationSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageAllocationSetStatus) DeepCopyInto(out *NnfStorageAllocationSetStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationSetStatus. +func (in *NnfStorageAllocationSetStatus) DeepCopy() *NnfStorageAllocationSetStatus { + if in == nil { + return nil + } + out := new(NnfStorageAllocationSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageList) DeepCopyInto(out *NnfStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageList. +func (in *NnfStorageList) DeepCopy() *NnfStorageList { + if in == nil { + return nil + } + out := new(NnfStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageLustreSpec) DeepCopyInto(out *NnfStorageLustreSpec) { + *out = *in + out.PersistentMgsReference = in.PersistentMgsReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreSpec. +func (in *NnfStorageLustreSpec) DeepCopy() *NnfStorageLustreSpec { + if in == nil { + return nil + } + out := new(NnfStorageLustreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageLustreStatus) DeepCopyInto(out *NnfStorageLustreStatus) { + *out = *in + out.LustreMgtReference = in.LustreMgtReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreStatus. +func (in *NnfStorageLustreStatus) DeepCopy() *NnfStorageLustreStatus { + if in == nil { + return nil + } + out := new(NnfStorageLustreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfile) DeepCopyInto(out *NnfStorageProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfile. +func (in *NnfStorageProfile) DeepCopy() *NnfStorageProfile { + if in == nil { + return nil + } + out := new(NnfStorageProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorageProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileCmdLines) DeepCopyInto(out *NnfStorageProfileCmdLines) { + *out = *in + out.VgChange = in.VgChange + out.LvChange = in.LvChange +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileCmdLines. +func (in *NnfStorageProfileCmdLines) DeepCopy() *NnfStorageProfileCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileData) DeepCopyInto(out *NnfStorageProfileData) { + *out = *in + in.LustreStorage.DeepCopyInto(&out.LustreStorage) + in.GFS2Storage.DeepCopyInto(&out.GFS2Storage) + in.XFSStorage.DeepCopyInto(&out.XFSStorage) + in.RawStorage.DeepCopyInto(&out.RawStorage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileData. +func (in *NnfStorageProfileData) DeepCopy() *NnfStorageProfileData { + if in == nil { + return nil + } + out := new(NnfStorageProfileData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileGFS2Data) DeepCopyInto(out *NnfStorageProfileGFS2Data) { + *out = *in + out.CmdLines = in.CmdLines + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileGFS2Data. +func (in *NnfStorageProfileGFS2Data) DeepCopy() *NnfStorageProfileGFS2Data { + if in == nil { + return nil + } + out := new(NnfStorageProfileGFS2Data) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMLvChangeCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMLvChangeCmdLines. +func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopy() *NnfStorageProfileLVMLvChangeCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLVMLvChangeCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMVgChangeCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMVgChangeCmdLines. +func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopy() *NnfStorageProfileLVMVgChangeCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLVMVgChangeCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileList) DeepCopyInto(out *NnfStorageProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorageProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileList. +func (in *NnfStorageProfileList) DeepCopy() *NnfStorageProfileList { + if in == nil { + return nil + } + out := new(NnfStorageProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorageProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreCmdLines) DeepCopyInto(out *NnfStorageProfileLustreCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreCmdLines. +func (in *NnfStorageProfileLustreCmdLines) DeepCopy() *NnfStorageProfileLustreCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreData) DeepCopyInto(out *NnfStorageProfileLustreData) { + *out = *in + out.MgtCmdLines = in.MgtCmdLines + out.MdtCmdLines = in.MdtCmdLines + out.MgtMdtCmdLines = in.MgtMdtCmdLines + out.OstCmdLines = in.OstCmdLines + in.MgtOptions.DeepCopyInto(&out.MgtOptions) + in.MdtOptions.DeepCopyInto(&out.MdtOptions) + in.MgtMdtOptions.DeepCopyInto(&out.MgtMdtOptions) + in.OstOptions.DeepCopyInto(&out.OstOptions) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreData. +func (in *NnfStorageProfileLustreData) DeepCopy() *NnfStorageProfileLustreData { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreMiscOptions) DeepCopyInto(out *NnfStorageProfileLustreMiscOptions) { + *out = *in + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreMiscOptions. +func (in *NnfStorageProfileLustreMiscOptions) DeepCopy() *NnfStorageProfileLustreMiscOptions { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreMiscOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileRawData) DeepCopyInto(out *NnfStorageProfileRawData) { + *out = *in + out.CmdLines = in.CmdLines + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileRawData. +func (in *NnfStorageProfileRawData) DeepCopy() *NnfStorageProfileRawData { + if in == nil { + return nil + } + out := new(NnfStorageProfileRawData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileXFSData) DeepCopyInto(out *NnfStorageProfileXFSData) { + *out = *in + out.CmdLines = in.CmdLines + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileXFSData. +func (in *NnfStorageProfileXFSData) DeepCopy() *NnfStorageProfileXFSData { + if in == nil { + return nil + } + out := new(NnfStorageProfileXFSData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageSpec) DeepCopyInto(out *NnfStorageSpec) { + *out = *in + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]NnfStorageAllocationSetSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageSpec. +func (in *NnfStorageSpec) DeepCopy() *NnfStorageSpec { + if in == nil { + return nil + } + out := new(NnfStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageStatus) DeepCopyInto(out *NnfStorageStatus) { + *out = *in + out.NnfStorageLustreStatus = in.NnfStorageLustreStatus + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]NnfStorageAllocationSetStatus, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageStatus. +func (in *NnfStorageStatus) DeepCopy() *NnfStorageStatus { + if in == nil { + return nil + } + out := new(NnfStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorage) DeepCopyInto(out *NnfSystemStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorage. +func (in *NnfSystemStorage) DeepCopy() *NnfSystemStorage { + if in == nil { + return nil + } + out := new(NnfSystemStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfSystemStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorageList) DeepCopyInto(out *NnfSystemStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfSystemStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageList. +func (in *NnfSystemStorageList) DeepCopy() *NnfSystemStorageList { + if in == nil { + return nil + } + out := new(NnfSystemStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfSystemStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorageSpec) DeepCopyInto(out *NnfSystemStorageSpec) { + *out = *in + out.SystemConfiguration = in.SystemConfiguration + if in.ExcludeRabbits != nil { + in, out := &in.ExcludeRabbits, &out.ExcludeRabbits + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludeRabbits != nil { + in, out := &in.IncludeRabbits, &out.IncludeRabbits + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludeComputes != nil { + in, out := &in.ExcludeComputes, &out.ExcludeComputes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludeComputes != nil { + in, out := &in.IncludeComputes, &out.IncludeComputes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ComputesPattern != nil { + in, out := &in.ComputesPattern, &out.ComputesPattern + *out = make([]int, len(*in)) + copy(*out, *in) + } + out.StorageProfile = in.StorageProfile +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageSpec. +func (in *NnfSystemStorageSpec) DeepCopy() *NnfSystemStorageSpec { + if in == nil { + return nil + } + out := new(NnfSystemStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorageStatus) DeepCopyInto(out *NnfSystemStorageStatus) { + *out = *in + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageStatus. +func (in *NnfSystemStorageStatus) DeepCopy() *NnfSystemStorageStatus { + if in == nil { + return nil + } + out := new(NnfSystemStorageStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index 1f4e5c4a0..cada51249 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -39,6 +39,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/healthz" zapcr "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -53,6 +54,7 @@ import ( controllers "github.com/NearNodeFlash/nnf-sos/internal/controller" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" //+kubebuilder:scaffold:imports nnf "github.com/NearNodeFlash/nnf-ec/pkg" @@ -76,6 +78,7 @@ func init() { utilruntime.Must(lusv1beta1.AddToScheme(scheme)) utilruntime.Must(mpiv2beta1.AddToScheme(scheme)) + utilruntime.Must(nnfv1alpha2.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -150,8 +153,6 @@ func main() { os.Exit(1) } - //+kubebuilder:scaffold:builder - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) @@ -216,8 +217,10 @@ func (c *nodeLocalController) SetupReconcilers(mgr manager.Manager, opts *nnf.Op Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NnfNode"), Scheme: mgr.GetScheme(), + Events: make(chan event.GenericEvent), SemaphoreForDone: semNnfNodeDone, NamespacedName: types.NamespacedName{Name: controllers.NnfNlcResourceName, Namespace: os.Getenv("NNF_NODE_NAME")}, + Options: opts, }).SetupWithManager(mgr); err != nil { return err } @@ -240,8 +243,10 @@ func (c *nodeLocalController) SetupReconcilers(mgr manager.Manager, opts *nnf.Op Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NnfNodeBlockStorage"), Scheme: mgr.GetScheme(), + Events: make(chan event.GenericEvent), SemaphoreForStart: semNnfNodeECDone, SemaphoreForDone: semNnfNodeBlockStorageDone, + Options: opts, }).SetupWithManager(mgr); err != nil { return err } @@ -367,26 +372,96 @@ func (c *storageController) SetupReconcilers(mgr manager.Manager, opts *nnf.Opti return err } + var err error + if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err := (&nnfv1alpha1.NnfStorageProfile{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha2.NnfStorageProfile{}).SetupWebhookWithManager(mgr); err != nil { ctrl.Log.Error(err, "unable to create webhook", "webhook", "NnfStorageProfile") return err } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err := (&nnfv1alpha1.NnfContainerProfile{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha2.NnfContainerProfile{}).SetupWebhookWithManager(mgr); err != nil { ctrl.Log.Error(err, "unable to create webhook", "webhook", "NnfContainerProfile") return err } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err := (&nnfv1alpha1.NnfDataMovementProfile{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha2.NnfDataMovementProfile{}).SetupWebhookWithManager(mgr); err != nil { ctrl.Log.Error(err, "unable to create webhook", "webhook", "NnfDataMovementProfile") return err } } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfAccess{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfAccess") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfDataMovement{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfDataMovement") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfDataMovementManager{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfDataMovementManager") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfLustreMGT{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfLustreMGT") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfNode{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfNode") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfNodeBlockStorage{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfNodeBlockStorage") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfNodeECData{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfNodeECData") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfNodeStorage{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfNodeStorage") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfPortManager{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfPortManager") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfStorage{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfStorage") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&nnfv1alpha2.NnfSystemStorage{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "NnfSystemStorage") + os.Exit(1) + } + } + //+kubebuilder:scaffold:builder + return nil } diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml index 4059f6fdd..54304c8c8 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml @@ -261,6 +261,256 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The desired state + jsonPath: .spec.desiredState + name: DESIREDSTATE + type: string + - description: The current state + jsonPath: .status.state + name: STATE + type: string + - description: Whether the state has been achieved + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfAccess is the Schema for the nnfaccesses API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfAccessSpec defines the desired state of NnfAccess + properties: + clientReference: + description: |- + ClientReference is for a client resource. (DWS) Computes is the only client + resource type currently supported + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + desiredState: + description: DesiredState is the desired state for the mounts on the + client + enum: + - mounted + - unmounted + type: string + groupID: + description: GroupID for the new mount. Currently only used for raw + format: int32 + type: integer + makeClientMounts: + default: true + description: |- + MakeClientMounts determines whether the ClientMount resources are made, or if only + the access list on the NnfNodeBlockStorage is updated + type: boolean + mountPath: + description: MountPath for the storage target on the client + type: string + mountPathPrefix: + type: string + storageReference: + description: StorageReference is the NnfStorage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + target: + description: |- + Target specifies which storage targets the client should mount + - single: Only one of the storage the client can access + - all: All of the storage the client can access + - shared: Multiple clients access the same storage + enum: + - single + - all + - shared + type: string + teardownState: + allOf: + - enum: + - Proposal + - Setup + - DataIn + - PreRun + - PostRun + - DataOut + - Teardown + - enum: + - PreRun + - PostRun + - Teardown + description: |- + TeardownState is the desired state of the workflow for this NNF Access resource to + be torn down and deleted. + type: string + userID: + description: UserID for the new mount. Currently only used for raw + format: int32 + type: integer + required: + - desiredState + - groupID + - makeClientMounts + - storageReference + - target + - teardownState + - userID + type: object + status: + description: NnfAccessStatus defines the observed state of NnfAccess + properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + description: Ready signifies whether status.state has been achieved + type: boolean + state: + description: State is the current state + enum: + - mounted + - unmounted + type: string + required: + - ready + - state + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml index 5ab727d5e..5bd634437 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml @@ -14858,4 +14858,14849 @@ spec: - data type: object served: true + storage: false + - name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfContainerProfile is the Schema for the nnfcontainerprofiles + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + description: NnfContainerProfileSpec defines the desired state of NnfContainerProfile + properties: + groupID: + description: |- + GroupID specifies the group ID that is allowed to use this profile. If this is specified, + only Workflows that have a matching group ID can select this profile. + format: int32 + type: integer + mpiSpec: + description: |- + MPIJobSpec to define the MPI containers created from this profile. This functionality is + provided via mpi-operator, a 3rd party tool to assist in running MPI applications across + worker containers. + Either this or Spec must be provided, but not both. + + + All the fields defined drive mpi-operator behavior. See the type definition of MPISpec for + more detail: + https://github.com/kubeflow/mpi-operator/blob/v0.4.0/pkg/apis/kubeflow/v2beta1/types.go#L137 + + + Note: most of these fields are fully customizable with a few exceptions. These fields are + overridden by NNF software to ensure proper behavior to interface with the DWS workflow + - Replicas + - RunPolicy.BackoffLimit (this is set above by `RetryLimit`) + - Worker/Launcher.RestartPolicy + properties: + mpiImplementation: + default: OpenMPI + description: |- + MPIImplementation is the MPI implementation. + Options are "OpenMPI" (default) and "Intel". + enum: + - OpenMPI + - Intel + type: string + mpiReplicaSpecs: + additionalProperties: + description: ReplicaSpec is a description of the replica + properties: + replicas: + description: |- + Replicas is the desired number of replicas of the given template. + If unspecified, defaults to 1. + format: int32 + type: integer + restartPolicy: + description: |- + Restart policy for all replicas within the job. + One of Always, OnFailure, Never and ExitCode. + Default to Never. + type: string + template: + description: |- + Template is the object that describes the pod that + will be created for this replica. RestartPolicy in PodTemplateSpec + will be overide by RestartPolicy in ReplicaSpec + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling + constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the + same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates + whether a service account token should be automatically + mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that + you want to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if + value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a + secret in the pod's namespace + properties: + key: + description: The key of the + secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select + from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier + to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a + network port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of + block devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path + inside of the container that the device + will be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the + pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS + resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if + value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a + secret in the pod's namespace + properties: + key: + description: The key of the + secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select + from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier + to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for + ephemeral containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral + containers. + items: + description: ContainerPort represents a + network port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of + block devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path + inside of the container that the device + will be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the + pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that + you want to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if + value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a + secret in the pod's namespace + properties: + key: + description: The key of the + secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select + from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier + to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a + network port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of + block devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path + inside of the container that the device + will be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the + pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference + to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition + in the pod's condition list with matching + type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find + the ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to + a Pod to guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter + to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given + topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume + in a pod that may be accessed by any container + in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure + Data Disk mount on the host and bind mount + to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host + Caching mode: None, Read Only, Read + Write.' + type: string + diskName: + description: diskName is the Name of the + data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data + disk in the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are + Shared: multiple blob disks per storage + account Dedicated: single blob disk + per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure + File Service mount on the host and bind + mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of + secret that contains Azure Storage Account + Name and Key + type: string + shareName: + description: shareName is the azure share + Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount + on the host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as + the mounted root, rather than the full + Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap + that should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) + represents ephemeral storage that is handled + by certain external CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward + API about the pod that should populate this + volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward + API volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type + of resource being referenced + type: string + name: + description: Name is the name + of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type + of resource being referenced + type: string + name: + description: Name is the name + of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim + references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label + query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the + binding reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel + resource that is attached to a kubelet's + host machine and then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target + lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: + FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the + driver to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this + field holds extra command options if + any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker + volume attached to a kubelet's host machine. + This depends on the Flocker control service + being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of + the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash + for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines + whether support iSCSI Discovery CHAP + authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target + Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret + for iSCSI target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents + a PhotonController persistent disk attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx + volume attached and mounted on kubelets + host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies + a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one + resources secrets, configmaps, and downward + API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume + projections + items: + description: Projection that may be + projected along with other supported + volume types + properties: + configMap: + description: configMap information + about the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string + key to a path within a volume. + properties: + key: + description: key is the + key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify + whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information + about the downwardAPI data to + project + properties: + items: + description: Items is a list + of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile + represents information to + create the file containing + the pod field + properties: + fieldRef: + description: 'Required: + Selects a field of the + pod: only annotations, + labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version + of the schema the + FieldPath is written + in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path + of the field to + select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: + Path is the relative + path name of the file + to be created. Must + not be absolute or contain + the ''..'' path. Must + be utf-8 encoded. The + first item of the relative + path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for + volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies + the output format + of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information + about the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string + key to a path within a volume. + properties: + key: + description: key is the + key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field + specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken + is information about the serviceAccountToken + data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte + mount on the host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by + name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO + persistent volume attached and mounted on + Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address + of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name + of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default + false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO + Storage Pool associated with the protection + domain. + type: string + system: + description: system is the name of the + storage system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS + volume attached and mounted on Kubernetes + nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere + volume attached and mounted on kubelets + host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the + storage Policy Based Management (SPBM) + profile name. + type: string + volumePath: + description: volumePath is the path that + identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + type: object + description: |- + MPIReplicaSpecs contains maps from `MPIReplicaType` to `ReplicaSpec` that + specify the MPI replicas to run. + type: object + runPolicy: + description: RunPolicy encapsulates various runtime policies of + the job. + properties: + activeDeadlineSeconds: + description: |- + Specifies the duration in seconds relative to the startTime that the job may be active + before the system tries to terminate it; value must be positive integer. + format: int64 + type: integer + backoffLimit: + description: Optional number of retries before marking this + job failed. + format: int32 + type: integer + cleanPodPolicy: + description: |- + CleanPodPolicy defines the policy to kill pods after the job completes. + Default to Running. + type: string + schedulingPolicy: + description: SchedulingPolicy defines the policy related to + scheduling, e.g. gang-scheduling + properties: + minAvailable: + description: |- + MinAvailable defines the minimal number of member to run the PodGroup. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.mimMember` in PodGroup for the volcano. + When using this field, you need to make sure the application supports resizing (e.g., Elastic Horovod). + + + If not set, it defaults to the number of workers. + format: int32 + type: integer + minResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + MinResources defines the minimal resources of members to run the PodGroup. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.mimResources` in PodGroup for volcano. + type: object + priorityClass: + description: |- + PriorityClass defines the PodGroup's PriorityClass. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.priorityClassName` in PodGroup for volcano. + type: string + queue: + description: |- + Queue defines the queue name to allocate resource for PodGroup. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.queue` in PodGroup for the volcano. + type: string + scheduleTimeoutSeconds: + description: |- + SchedulerTimeoutSeconds defines the maximal time of members to wait before run the PodGroup. + Currently, this parameter isn't respected in any case. + TODO (tenzen-y): Modify comments when supporting scheduler-plugins. + format: int32 + type: integer + type: object + suspend: + default: false + description: |- + suspend specifies whether the MPIJob controller should create Pods or not. + If a MPIJob is created with suspend set to true, no Pods are created by + the MPIJob controller. If a MPIJob is suspended after creation (i.e. the + flag goes from false to true), the MPIJob controller will delete all + active Pods and PodGroups associated with this MPIJob. Also, it will suspend the + Launcher Job. Users must design their workload to gracefully handle this. + Suspending a Job will reset the StartTime field of the MPIJob. + + + Defaults to false. + type: boolean + ttlSecondsAfterFinished: + description: |- + TTLSecondsAfterFinished is the TTL to clean up jobs. + It may take extra ReconcilePeriod seconds for the cleanup, since + reconcile gets called periodically. + Default to infinite. + format: int32 + type: integer + type: object + slotsPerWorker: + default: 1 + description: |- + Specifies the number of slots per worker used in hostfile. + Defaults to 1. + format: int32 + type: integer + sshAuthMountPath: + default: /root/.ssh + description: |- + SSHAuthMountPath is the directory where SSH keys are mounted. + Defaults to "/root/.ssh". + type: string + required: + - mpiReplicaSpecs + type: object + numPorts: + description: |- + Number of ports to open for communication with the user container. These ports are opened on + the targeted NNF nodes and can be accessed outside of the k8s cluster (e.g. compute nodes). + The requested ports are made available as environment variables inside the container and in + the DWS workflow (NNF_CONTAINER_PORTS). + format: int32 + type: integer + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + postRunTimeoutSeconds: + default: 300 + description: |- + Containers are expected to complete in the PostRun State. Allow this many seconds for the + containers to exit before declaring an error the workflow. + Defaults to 300 if not set. A value of 0 disables this behavior. + format: int64 + minimum: 0 + type: integer + preRunTimeoutSeconds: + default: 300 + description: |- + Containers are launched in the PreRun state. Allow this many seconds for the containers to + start before declaring an error to the workflow. + Defaults to 300 if not set. A value of 0 disables this behavior. + format: int64 + minimum: 0 + type: integer + retryLimit: + default: 6 + description: |- + Specifies the number of times a container will be retried upon a failure. A new pod is + deployed on each retry. Defaults to 6 by kubernetes itself and must be set. A value of 0 + disables retries. + format: int32 + minimum: 0 + type: integer + spec: + description: |- + Spec to define the containers created from this profile. This is used for non-MPI containers. + Refer to the K8s documentation for `PodSpec` for more definition: + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + Either this or MPISpec must be provided, but not both. + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a + service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to a pod + condition + properties: + conditionType: + description: ConditionType refers to a condition in the + pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to a Pod to guard + its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + storages: + description: List of possible filesystems supported by this container + profile + items: + description: |- + NnfContainerProfileStorage defines the mount point information that will be available to the + container + properties: + name: + description: 'Name specifies the name of the mounted filesystem; + must match the user supplied #DW directive' + type: string + optional: + default: false + description: |- + Optional designates that this filesystem is available to be mounted, but can be ignored by + the user not supplying this filesystem in the #DW directives + type: boolean + pvcMode: + description: |- + For DW_GLOBAL_ (global lustre) storages, the access mode must match what is configured in + the LustreFilesystem resource for the namespace. Defaults to `ReadWriteMany` for global + lustre, otherwise empty. + type: string + required: + - name + - optional + type: object + type: array + userID: + description: |- + UserID specifies the user ID that is allowed to use this profile. If this is specified, only + Workflows that have a matching user ID can select this profile. + format: int32 + type: integer + required: + - retryLimit + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + required: + - data + type: object + served: true storage: true diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml index e919f3fbb..6dc856307 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml @@ -7389,6 +7389,7384 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: True if manager readied all resoures + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfDataMovementManager is the Schema for the nnfdatamovementmanagers + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfDataMovementManagerSpec defines the desired state of NnfDataMovementManager + properties: + hostPath: + description: Host Path defines the directory location of shared mounts + on an individual worker node. + type: string + mountPath: + description: Mount Path defines the location within the container + at which the Host Path volume should be mounted. + type: string + selector: + description: |- + Selector defines the pod selector used in scheduling the worker nodes. This value is duplicated + to the template.spec.metadata.labels to satisfy the requirements of the worker's Daemon Set. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + template: + description: |- + Template defines the pod template that is used for the basis of the worker Daemon Set that + manages the per node data movement operations. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + updateStrategy: + description: |- + UpdateStrategy defines the UpdateStrategy that is used for the basis of the worker Daemon Set + that manages the per node data movement operations. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if type = "RollingUpdate". + --- + TODO: Update this to follow our convention for oneOf, whatever we decide it + to be. Same as Deployment `strategy.rollingUpdate`. + See https://github.com/kubernetes/kubernetes/issues/35345 + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of nodes with an existing available DaemonSet pod that + can have an updated DaemonSet pod during during an update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up to a minimum of 1. + Default value is 0. + Example: when this is set to 30%, at most 30% of the total number of nodes + that should be running the daemon pod (i.e. status.desiredNumberScheduled) + can have their a new pod created before the old pod is marked as deleted. + The update starts by launching new pods on 30% of nodes. Once an updated + pod is available (Ready for at least minReadySeconds) the old DaemonSet pod + on that node is marked deleted. If the old pod becomes unavailable for any + reason (Ready transitions to false, is evicted, or is drained) an updated + pod is immediatedly created on that node without considering surge limits. + Allowing surge implies the possibility that the resources consumed by the + daemonset on any given node can double if the readiness check fails, and + so resource intensive daemonsets should take into account that they may + cause evictions during disruption. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of DaemonSet pods that can be unavailable during the + update. Value can be an absolute number (ex: 5) or a percentage of total + number of DaemonSet pods at the start of the update (ex: 10%). Absolute + number is calculated from percentage by rounding up. + This cannot be 0 if MaxSurge is 0 + Default value is 1. + Example: when this is set to 30%, at most 30% of the total number of nodes + that should be running the daemon pod (i.e. status.desiredNumberScheduled) + can have their pods stopped for an update at any given time. The update + starts by stopping at most 30% of those DaemonSet pods and then brings + up new DaemonSet pods in their place. Once the new pods are available, + it then proceeds onto other DaemonSet pods, thus ensuring that at least + 70% of original number of DaemonSet pods are available at all times during + the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of daemon set update. Can be "RollingUpdate" + or "OnDelete". Default is RollingUpdate. + type: string + type: object + required: + - hostPath + - mountPath + - selector + - template + - updateStrategy + type: object + status: + description: NnfDataMovementManagerStatus defines the observed state of + NnfDataMovementManager + properties: + ready: + default: false + description: |- + Ready indicates that the Data Movement Manager has achieved the desired readiness state + and all managed resources are initialized. + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml index b7dd77510..475817b3e 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml @@ -138,5 +138,131 @@ spec: type: object type: object served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: True if this is the default instance + jsonPath: .data.default + name: DEFAULT + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + description: NnfDataMovementProfileData defines the desired state of NnfDataMovementProfile + properties: + command: + default: ulimit -n 2048 && mpirun --allow-run-as-root --hostfile $HOSTFILE + dcp --progress 1 --uid $UID --gid $GID $SRC $DEST + description: |- + Command to execute to perform data movement. $VARS are replaced by the nnf software and must + be present in the command. + Available $VARS: + HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` + UID: User ID that is inherited from the Workflow + GID: Group ID that is inherited from the Workflow + SRC: source for the data movement + DEST destination for the data movement + type: string + createDestDir: + default: true + description: |- + CreateDestDir will ensure that the destination directory exists before performing data + movement. This will cause a number of stat commands to determine the source and destination + file types, so that the correct pathing for the destination can be determined. Then, a mkdir + is issued. + type: boolean + default: + default: false + description: Default is true if this instance is the default resource + to use + type: boolean + logStdout: + default: false + description: |- + If true, enable the command's stdout to be saved in the log when the command completes + successfully. On failure, the output is always logged. + type: boolean + maxSlots: + default: 0 + description: |- + MaxSlots is the number of max_slots specified in the MPI hostfile. A value of 0 disables the + use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + minimum: 0 + type: integer + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + progressIntervalSeconds: + default: 5 + description: |- + NnfDataMovement resources have the ability to collect and store the progress percentage and the + last few lines of output in the CommandStatus field. This number is used for the interval to collect + the progress data. `dcp --progress N` must be included in the data movement command in order for + progress to be collected. A value of 0 disables this functionality. + minimum: 0 + type: integer + slots: + default: 8 + description: |- + Slots is the number of slots specified in the MPI hostfile. A value of 0 disables the use of + slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + minimum: 0 + type: integer + statCommand: + default: mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- + setpriv --euid $UID --egid $GID --clear-groups stat --cached never + -c '%F' $PATH + description: |- + If CreateDestDir is true, then use StatCommand to perform the stat commands. + Use setpriv to stat the path with the specified UID/GID. + Available $VARS: + HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + slots/max_slots for each host. This hostfile is created at + `/tmp//hostfile`. This is the same hostfile used as the one for Command. + UID: User ID that is inherited from the Workflow + GID: Group ID that is inherited from the Workflow + PATH: Path to stat + type: string + storeStdout: + default: false + description: |- + Similar to logStdout, store the command's stdout in Status.Message when the command completes + successfully. On failure, the output is always stored. + type: boolean + required: + - command + - createDestDir + - maxSlots + - slots + - statCommand + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true storage: true subresources: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml index fa67389e4..1a525b6bb 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml @@ -417,6 +417,412 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current state + jsonPath: .status.state + name: STATE + type: string + - description: Status of current state + jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfDataMovement is the Schema for the nnfdatamovements API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfDataMovementSpec defines the desired state of NnfDataMovement + properties: + cancel: + default: false + description: Set to true if the data movement operation should be + canceled. + type: boolean + destination: + description: Destination describes the destination of the data movement + operation + properties: + path: + description: Path describes the location of the user data relative + to the storage instance + type: string + storageReference: + description: |- + Storage describes the storage backing this data movement specification; Storage can reference + either NNF storage or global Lustre storage depending on the object references Kind field. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: object + groupId: + description: |- + Group Id specifies the group ID for the data movement operation. This value is used + in conjunction with the user ID to ensure the user has valid permissions to perform + the data movement operation. + format: int32 + type: integer + profileReference: + description: |- + ProfileReference is an object reference to an NnfDataMovementProfile that is used to + configure data movement. If empty, the default profile is used. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + source: + description: Source describes the source of the data movement operation + properties: + path: + description: Path describes the location of the user data relative + to the storage instance + type: string + storageReference: + description: |- + Storage describes the storage backing this data movement specification; Storage can reference + either NNF storage or global Lustre storage depending on the object references Kind field. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: object + userConfig: + description: |- + User defined configuration on how data movement should be performed. This overrides the + configuration defined in the supplied ProfileReference/NnfDataMovementProfile. These values + are typically set by the Copy Offload API. + properties: + dcpOptions: + description: Extra options to pass to the dcp command (used to + perform data movement). + type: string + dryrun: + default: false + description: |- + Fake the Data Movement operation. The system "performs" Data Movement but the command to do so + is trivial. This means a Data Movement request is still submitted but the IO is skipped. + type: boolean + logStdout: + default: false + description: |- + If true, enable the command's stdout to be saved in the log when the command completes + successfully. On failure, the output is always logged. + Note: Enabling this option may degrade performance. + type: boolean + maxSlots: + description: |- + The number of max_slots specified in the MPI hostfile. A value of 0 disables the use of slots + in the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + type: integer + mpirunOptions: + description: Extra options to pass to the mpirun command (used + to perform data movement). + type: string + slots: + description: |- + The number of slots specified in the MPI hostfile. A value of 0 disables the use of slots in + the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + type: integer + storeStdout: + default: false + description: |- + Similar to LogStdout, store the command's stdout in Status.Message when the command completes + successfully. On failure, the output is always stored. + Note: Enabling this option may degrade performance. + type: boolean + type: object + userId: + description: |- + User Id specifies the user ID for the data movement operation. This value is used + in conjunction with the group ID to ensure the user has valid permissions to perform + the data movement operation. + format: int32 + type: integer + type: object + status: + description: NnfDataMovementStatus defines the observed state of NnfDataMovement + properties: + commandStatus: + description: |- + CommandStatus reflects the current status of the underlying Data Movement command + as it executes. The command status is polled at a certain frequency to avoid excessive + updates to the Data Movement resource. + properties: + command: + description: The command that was executed during data movement. + type: string + data: + description: |- + Data is parsed from the dcp output when the command is finished. This is the total amount of + data copied by dcp. + type: string + directories: + description: |- + Directories is parsed from the dcp output when the command is finished. This is the number of + directories that dcp copied. Note: This value may be inflated due to NNF index mount + directories when copying from XFS or GFS2 filesystems. + format: int32 + type: integer + elapsedTime: + description: ElapsedTime reflects the elapsed time since the underlying + data movement command started. + type: string + files: + description: |- + Files is parsed from the dcp output when the command is finished. This is the number of files + that dcp copied. + format: int32 + type: integer + items: + description: |- + Items is parsed from the dcp output when the command is finished. This is a total of + the number of directories, files, and links that dcp copied. + format: int32 + type: integer + lastMessage: + description: |- + LastMessage reflects the last message received over standard output or standard error as + captured by the underlying data movement command. + type: string + lastMessageTime: + description: |- + LastMessageTime reflects the time at which the last message was received over standard output + or standard error by the underlying data movement command. + format: date-time + type: string + links: + description: |- + Links is parsed from the dcp output when the command is finished. This is the number of links + that dcp copied. + format: int32 + type: integer + progress: + description: |- + ProgressPercentage refects the progress of the underlying data movement command as captured from + standard output. A best effort is made to parse the command output as a percentage. If no + progress has yet to be measured than this field is omitted. If the latest command output does + not contain a valid percentage, then the value is unchanged from the previously parsed value. + format: int32 + type: integer + rate: + description: |- + Rate is parsed from the dcp output when the command is finished. This is transfer rate of the + data copied by dcp. + type: string + seconds: + description: Seconds is parsed from the dcp output when the command + is finished. + type: string + type: object + endTime: + description: EndTime reflects the time at which the Data Movement + operation ended. + format: date-time + type: string + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + message: + description: |- + Message contains any text that explains the Status. If Data Movement failed or storeStdout is + enabled, this will contain the command's output. + type: string + restarts: + description: Restarts contains the number of restarts of the Data + Movement operation. + type: integer + startTime: + description: StartTime reflects the time at which the Data Movement + operation started. + format: date-time + type: string + state: + description: Current state of data movement. + enum: + - Starting + - Running + - Finished + type: string + status: + description: Status of the current state. + enum: + - Success + - Failed + - Invalid + - Cancelled + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml b/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml index 319dcf335..a995a99ab 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml @@ -196,6 +196,277 @@ spec: will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + fsNameNext: + description: FsNameNext is the next available fsname that hasn't been + used + maxLength: 8 + minLength: 8 + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfLustreMGT is the Schema for the nnfstorageprofiles API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfLustreMGTSpec defines the desired state of NnfLustreMGT + properties: + addresses: + description: Addresses is the list of LNet addresses for the MGT + items: + type: string + type: array + claimList: + description: ClaimList is the list of currently in use fsnames + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + fsNameBlackList: + description: |- + FsNameBlackList is a list of fsnames that can't be used. This may be + necessary if the MGT hosts file systems external to Rabbit + items: + type: string + type: array + fsNameStart: + description: FsNameStart is the starting fsname to be used + maxLength: 8 + minLength: 8 + type: string + fsNameStartReference: + description: |- + FsNameStartReference can be used to add a configmap where the starting fsname is + stored. If this reference is set, it takes precendence over FsNameStart. The configmap + will be updated with the next available fsname anytime an fsname is used. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - addresses + type: object + status: + description: NnfLustreMGTStatus defines the current state of NnfLustreMGT + properties: + claimList: + description: ClaimList is the list of currently in use fsnames + items: + properties: + fsname: + type: string + reference: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml index 836c7486f..a5fb8371c 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml @@ -174,6 +174,169 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. + Storage spec are created on request of the user and fullfilled by the NNF Node Controller. + properties: + allocations: + description: Allocations is the list of storage allocations to make + items: + properties: + access: + description: List of nodes where /dev devices should be created + items: + type: string + type: array + capacity: + description: Aggregate capacity of the block devices for each + allocation + format: int64 + type: integer + type: object + type: array + sharedAllocation: + description: SharedAllocation is used when a single NnfNodeBlockStorage + allocation is used by multiple NnfNodeStorage allocations + type: boolean + required: + - sharedAllocation + type: object + status: + properties: + allocations: + description: Allocations is the list of storage allocations that were + made + items: + properties: + accesses: + additionalProperties: + properties: + devicePaths: + description: /dev paths for each of the block devices + items: + type: string + type: array + storageGroupId: + description: Redfish ID for the storage group + type: string + type: object + description: Accesses is a map of node name to the access status + type: object + capacityAllocated: + description: |- + Total capacity allocated for the storage. This may differ from the requested storage + capacity as the system may round up to the requested capacity to satisify underlying + storage requirements (i.e. block size / stripe size). + format: int64 + type: integer + devices: + description: List of NVMe namespaces used by this allocation + items: + properties: + NQN: + description: NQN of the base NVMe device + type: string + capacityAllocated: + description: |- + Total capacity allocated for the storage. This may differ from the requested storage + capacity as the system may round up to the requested capacity to satisify underlying + storage requirements (i.e. block size / stripe size). + format: int64 + type: integer + namespaceId: + description: Id of the Namespace on the NVMe device (e.g., + "2") + type: string + required: + - NQN + - namespaceId + type: object + type: array + storagePoolId: + description: Redfish ID for the storage pool + type: string + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + podStartTime: + description: |- + PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did + last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths + listed in the status section are from the current boot of the node. + format: date-time + type: string + ready: + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml index e9ca518a7..a014def37 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml @@ -15,6 +15,46 @@ spec: scope: Namespaced versions: - name: v1alpha1 + schema: + openAPIV3Schema: + description: NnfNodeECData is the Schema for the nnfnodeecdata API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfNodeECDataSpec defines the desired state of NnfNodeECData + type: object + status: + description: NnfNodeECDataStatus defines the observed state of NnfNodeECData + properties: + data: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha2 schema: openAPIV3Schema: description: NnfNodeECData is the Schema for the nnfnodeecdata API diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml index f8794e31e..181abe442 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml @@ -171,6 +171,166 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current desired state + jsonPath: .spec.state + name: STATE + type: string + - description: Health of node + jsonPath: .status.health + name: HEALTH + type: string + - description: Current status of node + jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - description: Parent pod name + jsonPath: .spec.pod + name: POD + priority: 1 + type: string + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfNode is the Schema for the NnfNode API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfNodeSpec defines the desired state of NNF Node + properties: + name: + description: The unique name for this NNF Node + type: string + pod: + description: Pod name for this NNF Node + type: string + state: + description: State reflects the desired state of this NNF Node resource + enum: + - Enable + - Disable + type: string + required: + - state + type: object + status: + description: NnfNodeStatus defines the observed status of NNF Node + properties: + capacity: + format: int64 + type: integer + capacityAllocated: + format: int64 + type: integer + drives: + items: + description: NnfDriveStatus defines the observe status of drives + connected to this NNF Node + properties: + capacity: + description: |- + Capacity in bytes of the device. The full capacity may not + be usable depending on what the storage driver can provide. + format: int64 + type: integer + firmwareVersion: + description: The firmware version of this storage controller. + type: string + health: + description: NnfResourceHealthType defines the health of an + NNF resource. + type: string + id: + description: ID reflects the NNF Node unique identifier for + this NNF Server resource. + type: string + model: + description: Model is the manufacturer information about the + device + type: string + name: + description: Name reflects the common name of this NNF Server + resource. + type: string + serialNumber: + description: The serial number for this storage controller. + type: string + slot: + description: Physical slot location of the storage controller. + type: string + status: + description: NnfResourceStatusType is the string that indicates + the resource's status + type: string + wearLevel: + description: WearLevel in percent for SSDs + format: int64 + type: integer + type: object + type: array + fenced: + description: Fenced is true when the NNF Node is fenced by the STONITH + agent, and false otherwise. + type: boolean + health: + description: NnfResourceHealthType defines the health of an NNF resource. + type: string + lnetNid: + description: LNetNid is the LNet address for the NNF node + type: string + servers: + items: + description: NnfServerStatus defines the observed status of servers + connected to this NNF Node + properties: + health: + description: NnfResourceHealthType defines the health of an + NNF resource. + type: string + hostname: + type: string + id: + description: ID reflects the NNF Node unique identifier for + this NNF Server resource. + type: string + name: + description: Name reflects the common name of this NNF Server + resource. + type: string + status: + description: NnfResourceStatusType is the string that indicates + the resource's status + type: string + type: object + type: array + status: + description: Status reflects the current status of the NNF Node + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml index 919fe2efe..a5986b0ad 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml @@ -170,7 +170,225 @@ spec: type: integer required: - count - - fileSystemType + - groupID + - sharedAllocation + - userID + type: object + status: + description: NnfNodeStorageStatus defines the status for NnfNodeStorage + properties: + allocations: + description: Allocations is the list of storage allocations that were + made + items: + description: NnfNodeStorageAllocationStatus defines the allocation + status for each allocation in the NnfNodeStorage + properties: + logicalVolume: + description: Name of the LVM LV + type: string + ready: + type: boolean + volumeGroup: + description: Name of the LVM VG + type: string + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + type: boolean + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfNodeStorage is the Schema for the NnfNodeStorage API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NnfNodeStorageSpec defines the desired storage attributes on a NNF Node. + Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. + properties: + blockReference: + description: BlockReference is an object reference to an NnfNodeBlockStorage + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + capacity: + description: Capacity of an individual allocation + format: int64 + type: integer + count: + description: |- + Count is the number of allocations to make on this node. All of the allocations will + be created with the same parameters + minimum: 0 + type: integer + fileSystemType: + default: raw + description: |- + FileSystemType defines the type of the desired filesystem, or raw + block device. + enum: + - raw + - lvm + - zfs + - xfs + - gfs2 + - lustre + type: string + groupID: + description: Group ID for file system + format: int32 + type: integer + lustreStorage: + description: |- + LustreStorageSpec describes the Lustre target created here, if + FileSystemType specifies a Lustre target. + properties: + backFs: + description: BackFs is the type of backing filesystem to use. + enum: + - ldiskfs + - zfs + type: string + fileSystemName: + description: FileSystemName is the fsname parameter for the Lustre + filesystem. + maxLength: 8 + type: string + mgsAddress: + description: |- + MgsAddress is the NID of the MGS to use. This is used only when + creating MDT and OST targets. + type: string + startIndex: + description: |- + StartIndex is used to order a series of MDTs or OSTs. This is used only + when creating MDT and OST targets. If count in the NnfNodeStorageSpec is more + than 1, then StartIndex is the index of the first allocation, and the indexes + increment from there. + minimum: 0 + type: integer + targetType: + description: TargetType is the type of Lustre target to be created. + enum: + - mgt + - mdt + - mgtmdt + - ost + type: string + type: object + sharedAllocation: + description: SharedAllocation is used when a single NnfNodeBlockStorage + allocation is used by multiple NnfNodeStorage allocations + type: boolean + userID: + description: User ID for file system + format: int32 + type: integer + required: + - count - groupID - sharedAllocation - userID diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml index 6558cf344..f82f6c97a 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml @@ -248,6 +248,243 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfPortManager is the Schema for the nnfportmanagers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfPortManagerSpec defines the desired state of NnfPortManager + properties: + allocations: + description: |- + Allocations is a list of allocation requests that the Port Manager will attempt + to satisfy. To request port resources from the port manager, clients should add + an entry to the allocations. Entries must be unique. The port manager controller + will attempt to allocate port resources for each allocation specification in the + list. To remove an allocation and free up port resources, remove the allocation + from the list. + items: + description: NnfPortManagerAllocationSpec defines the desired state + for a single port allocation + properties: + count: + default: 1 + description: |- + Count is the number of desired ports the requester needs. The port manager + will attempt to allocate this many ports. + type: integer + requester: + description: Requester is an object reference to the requester + of a ports. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - count + - requester + type: object + type: array + systemConfiguration: + description: |- + SystemConfiguration is an object reference to the system configuration. The + Port Manager will use the available ports defined in the system configuration. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - allocations + - systemConfiguration + type: object + status: + description: NnfPortManagerStatus defines the observed state of NnfPortManager + properties: + allocations: + description: Allocations is a list of port allocation status'. + items: + description: NnfPortManagerAllocationStatus defines the allocation + status of a port for a given requester. + properties: + ports: + description: Ports is list of ports allocated to the owning + resource. + items: + type: integer + type: array + requester: + description: |- + Requester is an object reference to the requester of the port resource, if one exists, or + empty otherwise. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + status: + description: Status is the ownership status of the port. + enum: + - InUse + - Free + - Cooldown + - InvalidConfiguration + - InsufficientResources + type: string + timeUnallocated: + description: |- + TimeUnallocated is when the port was unallocated. This is to ensure the proper cooldown + duration. + format: date-time + type: string + required: + - status + type: object + type: array + status: + description: Status is the current status of the port manager. + enum: + - Ready + - SystemConfigurationNotFound + type: string + required: + - status + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml index 3eb56816b..09c7ee982 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml @@ -593,5 +593,586 @@ spec: type: object type: object served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: True if this is the default instance + jsonPath: .data.default + name: DEFAULT + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfStorageProfile is the Schema for the nnfstorageprofiles API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + description: NnfStorageProfileData defines the desired state of NnfStorageProfile + properties: + default: + default: false + description: Default is true if this instance is the default resource + to use + type: boolean + gfs2Storage: + description: GFS2Storage defines the GFS2-specific configuration + properties: + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + capacity requested in the DirectiveBreakdown + type: string + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string + sharedVg: + default: false + description: |- + SharedVg specifies that allocations from a workflow on the same Rabbit should share an + LVM VolumeGroup + type: boolean + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + type: object + lustreStorage: + description: LustreStorage defines the Lustre-specific configuration + properties: + capacityMdt: + default: 5GiB + description: |- + CapacityMDT specifies the size of the MDT device. This is also + used for a combined MGT+MDT device. + pattern: ^\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$ + type: string + capacityMgt: + default: 5GiB + description: CapacityMGT specifies the size of the MGT device. + pattern: ^\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$ + type: string + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + OST capacity requested in the DirectiveBreakdown + type: string + combinedMgtMdt: + default: false + description: CombinedMGTMDT indicates whether the MGT and MDT + should be created on the same target device + type: boolean + exclusiveMdt: + default: false + description: ExclusiveMDT indicates that the MDT should not be + colocated with any other target on the chosen server. + type: boolean + externalMgs: + description: |- + ExternalMGS specifies the use of an existing MGS rather than creating one. This can + be either the NID(s) of a pre-existing MGS that should be used, or it can be an NNF Persistent + Instance that was created with the "StandaloneMGTPoolName" option. In the latter case, the format + is "pool:poolName" where "poolName" is the argument from "StandaloneMGTPoolName". A single MGS will + be picked from the pool. + type: string + mdtCommandlines: + description: MdtCmdLines contains commands to create an MDT target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mdtOptions: + description: MdtOptions contains options to use for libraries + used for an MDT target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + mgtCommandlines: + description: MgtCmdLines contains commands to create an MGT target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mgtMdtCommandlines: + description: MgtMdtCmdLines contains commands to create a combined + MGT/MDT target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mgtMdtOptions: + description: MgtMdtOptions contains options to use for libraries + used for a combined MGT/MDT target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + mgtOptions: + description: MgtOptions contains options to use for libraries + used for an MGT target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + mountCompute: + description: MountCompute specifies mount options for making the + Lustre client mount on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for making the + Lustre client mount on the Rabbit. + type: string + ostCommandlines: + description: OstCmdLines contains commands to create an OST target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + ostOptions: + description: OstOptions contains options to use for libraries + used for an OST target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + standaloneMgtPoolName: + description: |- + StandaloneMGTPoolName creates a Lustre MGT without a MDT or OST. This option can only be used when creating + a persistent Lustre instance. The MGS is placed into a named pool that can be used by the "ExternalMGS" option. + Multiple pools can be created. + type: string + type: object + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + rawStorage: + description: RawStorage defines the Raw-specific configuration + properties: + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + capacity requested in the DirectiveBreakdown + type: string + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string + sharedVg: + default: false + description: |- + SharedVg specifies that allocations from a workflow on the same Rabbit should share an + LVM VolumeGroup + type: boolean + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + type: object + xfsStorage: + description: XFSStorage defines the XFS-specific configuration + properties: + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + capacity requested in the DirectiveBreakdown + type: string + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string + sharedVg: + default: false + description: |- + SharedVg specifies that allocations from a workflow on the same Rabbit should share an + LVM VolumeGroup + type: boolean + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + type: object + required: + - gfs2Storage + - lustreStorage + - rawStorage + - xfsStorage + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true storage: true subresources: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml index aa976e9f3..08042c1d9 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml @@ -190,7 +190,301 @@ spec: type: integer required: - allocationSets - - fileSystemType + - groupID + - userID + type: object + status: + description: NnfStorageStatus defines the observed status of NNF Storage. + properties: + allocationSets: + description: |- + AllocationsSets holds the status information for each of the AllocationSets + from the spec. + items: + description: NnfStorageAllocationSetStatus contains the status information + for an allocation set + properties: + allocationCount: + description: |- + AllocationCount is the total number of allocations that currently + exist + type: integer + ready: + type: boolean + required: + - allocationCount + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + fileSystemName: + description: FileSystemName is the fsname parameter for the Lustre + filesystem. + maxLength: 8 + type: string + lustreMgtReference: + description: |- + LustgreMgtReference is an object reference to the NnfLustreMGT resource used + by the NnfStorage + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + mgsAddress: + description: MgsAddress is the NID of the MGS. + type: string + ready: + description: Ready reflects the status of this NNF Storage + type: boolean + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.error.severity + name: ERROR + type: string + name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfStorage is the Schema for the storages API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NnfStorageSpec defines the specification for requesting generic storage on a set + of available NNF Nodes. This object is related to a #DW for NNF Storage, with the WLM + making the determination for which NNF Nodes it wants to utilize. + properties: + allocationSets: + description: |- + AllocationSets is a list of different types of storage allocations to make. Each + AllocationSet describes an entire allocation spanning multiple Rabbits. For example, + an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw + block devices in a raw block configuration. + items: + description: NnfStorageAllocationSetSpec defines the details for + an allocation set + properties: + backFs: + description: BackFs is the type of backing filesystem to use. + enum: + - ldiskfs + - zfs + type: string + capacity: + description: |- + Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself + may split the storage among the available drives operating in the NNF Node. + format: int64 + type: integer + mgsAddress: + description: |- + MgsAddress is the NID of the MGS when a pre-existing MGS is + provided in the NnfStorageProfile + type: string + name: + description: Name is a human readable label for this set of + allocations (e.g., xfs) + type: string + nodes: + description: Nodes is the list of Rabbit nodes to make allocations + on + items: + description: NnfStorageAllocationNodes identifies the node + and properties of the allocation to make on that node + properties: + count: + description: Number of allocations to make on this node + type: integer + name: + description: Name of the node to make the allocation on + type: string + required: + - count + - name + type: object + type: array + persistentMgsReference: + description: |- + PersistentMgsReference is a reference to a persistent storage that is providing + the external MGS. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + sharedAllocation: + description: |- + SharedAllocation shares a single block storage allocation between multiple file system allocations + (within the same workflow) on a Rabbit + type: boolean + targetType: + description: TargetType is the type of Lustre target to be created. + enum: + - mgt + - mdt + - mgtmdt + - ost + type: string + required: + - capacity + - name + - nodes + - sharedAllocation + type: object + type: array + fileSystemType: + default: raw + description: |- + FileSystemType defines the type of the desired filesystem, or raw + block device. + enum: + - raw + - lvm + - zfs + - xfs + - gfs2 + - lustre + type: string + groupID: + description: Group ID for file system + format: int32 + type: integer + userID: + description: User ID for file system + format: int32 + type: integer + required: + - allocationSets - groupID - userID type: object diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml index eb9f0595a..3e942d6fb 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml @@ -200,10 +200,248 @@ spec: type: string required: - capacity - - computesTarget - makeClientMounts - storageProfile - - type + type: object + status: + description: NnfSystemStorageStatus defines the observed state of NnfSystemStorage + properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + description: Ready signifies whether all work has been completed + type: boolean + required: + - ready + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: NnfSystemStorage is the Schema for the nnfsystemstorages API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfSystemStorageSpec defines the desired state of NnfSystemStorage + properties: + capacity: + default: 1073741824 + description: Capacity is the allocation size on each Rabbit + format: int64 + type: integer + clientMountPath: + description: ClientMountPath is an optional path for where to mount + the file system on the computes + type: string + computesPattern: + description: |- + ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This + is only used if ComputesTarget is "pattern" + items: + type: integer + maxItems: 16 + type: array + computesTarget: + default: all + description: ComputesTarget specifies which computes to make the storage + accessible to + enum: + - all + - even + - odd + - pattern + type: string + excludeComputes: + description: |- + ExcludeComputes is a list of compute nodes to exclude from the the compute nodes listed in the + SystemConfiguration + items: + type: string + type: array + excludeDisabledRabbits: + default: false + description: |- + ExcludeDisabledRabbits looks at the Storage resource for a Rabbit and does not use it if it's + marked as "disabled" + type: boolean + excludeRabbits: + description: ExludeRabbits is a list of Rabbits to exclude from the + Rabbits in the SystemConfiguration + items: + type: string + type: array + includeComputes: + description: |- + IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes + from the SystemConfiguration + items: + type: string + type: array + includeRabbits: + description: |- + IncludeRabbits is a list of Rabbits to use rather than getting the list of Rabbits from the + SystemConfiguration + items: + type: string + type: array + makeClientMounts: + default: false + description: |- + MakeClientMounts specifies whether to make ClientMount resources or just + make the devices available to the client + type: boolean + storageProfile: + description: StorageProfile is an object reference to the storage + profile to use + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + systemConfiguration: + description: |- + SystemConfiguration is an object reference to the SystemConfiguration resource to use. If this + field is empty, name: default namespace: default is used. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: + default: raw + description: Type is the file system type to use for the storage allocation + enum: + - raw + - xfs + - gfs2 + type: string + required: + - capacity + - makeClientMounts + - storageProfile type: object status: description: NnfSystemStorageStatus defines the observed state of NnfSystemStorage diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index d7cc1edea..f719fb868 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -21,35 +21,41 @@ resources: patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- path: patches/webhook_in_nodes.yaml -#- path: patches/webhook_in_nodestorages.yaml -#- path: patches/webhook_in_storages.yaml -#- path: patches/webhook_in_datamovementmanagers.yaml -#- path: patches/webhook_in_datamovements.yaml -#- path: patches/webhook_in_nnfaccesses.yaml -#- path: patches/webhook_in_nnfstorageprofiles.yaml -#- path: patches/webhook_in_nnfnodeecdata.yaml -#- path: patches/webhook_in_nnfcontainerprofiles.yaml -#- path: patches/webhook_in_nnfportmanagers.yaml -#- path: patches/webhook_in_nnflustremgts.yaml -#- path: patches/webhook_in_nnfdatamovementprofiles.yaml +- path: patches/webhook_in_nnfnodes.yaml +- path: patches/webhook_in_nnfnodestorages.yaml +- path: patches/webhook_in_nnfstorages.yaml +- path: patches/webhook_in_nnfdatamovementmanagers.yaml +- path: patches/webhook_in_nnfdatamovements.yaml +- path: patches/webhook_in_nnfaccesses.yaml +- path: patches/webhook_in_nnfstorageprofiles.yaml +- path: patches/webhook_in_nnfnodeecdata.yaml +- path: patches/webhook_in_nnfcontainerprofiles.yaml +- path: patches/webhook_in_nnfportmanagers.yaml +- path: patches/webhook_in_nnflustremgts.yaml +- path: patches/webhook_in_nnfdatamovementprofiles.yaml +#- path: patches/webhook_in_nnfnodes.yaml +- path: patches/webhook_in_nnfnodeblockstorages.yaml +#- path: patches/webhook_in_nnfnodestorages.yaml +#- path: patches/webhook_in_nnfstorages.yaml +- path: patches/webhook_in_nnfsystemstorages.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- path: patches/cainjection_in_nodes.yaml -#- path: patches/cainjection_in_nodestorages.yaml -#- path: patches/cainjection_in_storages.yaml -#- path: patches/cainjection_in_datamovementmanagers.yaml -#- path: patches/cainjection_in_datamovements.yaml -#- path: patches/cainjection_in_nnfaccesses.yaml -#- path: patches/cainjection_in_nnfstorageprofiles.yaml -#- path: patches/cainjection_in_nnfnodeecdata.yaml -#- path: patches/cainjection_in_nnfcontainerprofiles.yaml -#- path: patches/cainjection_in_nnfportmanagers.yaml -#- path: patches/cainjection_in_nnflustremgts.yaml -#- path: patches/cainjection_in_nnfdatamovementprofiles.yaml -#- path: patches/cainjection_in_nnfsystemstorages.yaml +- path: patches/cainjection_in_nnfnodes.yaml +- path: patches/cainjection_in_nnfnodestorages.yaml +- path: patches/cainjection_in_nnfstorages.yaml +- path: patches/cainjection_in_nnfdatamovementmanagers.yaml +#- path: patches/cainjection_in_nnfdatamovements.yaml +- path: patches/cainjection_in_nnfaccesses.yaml +- path: patches/cainjection_in_nnfstorageprofiles.yaml +- path: patches/cainjection_in_nnfnodeecdata.yaml +- path: patches/cainjection_in_nnfcontainerprofiles.yaml +- path: patches/cainjection_in_nnfportmanagers.yaml +- path: patches/cainjection_in_nnflustremgts.yaml +- path: patches/cainjection_in_nnfdatamovementprofiles.yaml +- path: patches/cainjection_in_nnfsystemstorages.yaml +- path: patches/cainjection_in_nnfnodeblockstorages.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_datamovementmanagers.yaml b/config/crd/patches/cainjection_in_nnfdatamovementmanagers.yaml similarity index 100% rename from config/crd/patches/cainjection_in_datamovementmanagers.yaml rename to config/crd/patches/cainjection_in_nnfdatamovementmanagers.yaml diff --git a/config/crd/patches/cainjection_in_datamovements.yaml b/config/crd/patches/cainjection_in_nnfdatamovements.yaml similarity index 100% rename from config/crd/patches/cainjection_in_datamovements.yaml rename to config/crd/patches/cainjection_in_nnfdatamovements.yaml diff --git a/config/crd/patches/cainjection_in_nnflustremgts.yaml b/config/crd/patches/cainjection_in_nnflustremgts.yaml new file mode 100644 index 000000000..fcbbca69a --- /dev/null +++ b/config/crd/patches/cainjection_in_nnflustremgts.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: nnflustremgts.nnf.cray.hpe.com diff --git a/config/crd/patches/cainjection_in_nnfnodeblockstorages.yaml b/config/crd/patches/cainjection_in_nnfnodeblockstorages.yaml new file mode 100644 index 000000000..69e1c634b --- /dev/null +++ b/config/crd/patches/cainjection_in_nnfnodeblockstorages.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: nnfnodeblockstorages.nnf.cray.hpe.com diff --git a/config/crd/patches/cainjection_in_nodecontrollers.yaml b/config/crd/patches/cainjection_in_nnfnodes.yaml similarity index 100% rename from config/crd/patches/cainjection_in_nodecontrollers.yaml rename to config/crd/patches/cainjection_in_nnfnodes.yaml diff --git a/config/crd/patches/cainjection_in_nodestorages.yaml b/config/crd/patches/cainjection_in_nnfnodestorages.yaml similarity index 100% rename from config/crd/patches/cainjection_in_nodestorages.yaml rename to config/crd/patches/cainjection_in_nnfnodestorages.yaml diff --git a/config/crd/patches/cainjection_in_storages.yaml b/config/crd/patches/cainjection_in_nnfstorages.yaml similarity index 100% rename from config/crd/patches/cainjection_in_storages.yaml rename to config/crd/patches/cainjection_in_nnfstorages.yaml diff --git a/config/crd/patches/cainjection_in_nnfsystemstorages.yaml b/config/crd/patches/cainjection_in_nnfsystemstorages.yaml new file mode 100644 index 000000000..dc41a6616 --- /dev/null +++ b/config/crd/patches/cainjection_in_nnfsystemstorages.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: nnfsystemstorages.nnf.cray.hpe.com diff --git a/config/crd/patches/webhook_in_datamovementmanagers.yaml b/config/crd/patches/webhook_in_nnfdatamovementmanagers.yaml similarity index 100% rename from config/crd/patches/webhook_in_datamovementmanagers.yaml rename to config/crd/patches/webhook_in_nnfdatamovementmanagers.yaml diff --git a/config/crd/patches/webhook_in_datamovements.yaml b/config/crd/patches/webhook_in_nnfdatamovements.yaml similarity index 100% rename from config/crd/patches/webhook_in_datamovements.yaml rename to config/crd/patches/webhook_in_nnfdatamovements.yaml diff --git a/config/crd/patches/webhook_in_nnflustremgts.yaml b/config/crd/patches/webhook_in_nnflustremgts.yaml new file mode 100644 index 000000000..65529af39 --- /dev/null +++ b/config/crd/patches/webhook_in_nnflustremgts.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nnflustremgts.nnf.cray.hpe.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_nnfnodeblockstorages.yaml b/config/crd/patches/webhook_in_nnfnodeblockstorages.yaml new file mode 100644 index 000000000..19634cff3 --- /dev/null +++ b/config/crd/patches/webhook_in_nnfnodeblockstorages.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nnfnodeblockstorages.nnf.cray.hpe.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_nodecontrollers.yaml b/config/crd/patches/webhook_in_nnfnodes.yaml similarity index 89% rename from config/crd/patches/webhook_in_nodecontrollers.yaml rename to config/crd/patches/webhook_in_nnfnodes.yaml index 6926f7a48..60b97363a 100644 --- a/config/crd/patches/webhook_in_nodecontrollers.yaml +++ b/config/crd/patches/webhook_in_nnfnodes.yaml @@ -12,3 +12,5 @@ spec: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_nodestorages.yaml b/config/crd/patches/webhook_in_nnfnodestorages.yaml similarity index 89% rename from config/crd/patches/webhook_in_nodestorages.yaml rename to config/crd/patches/webhook_in_nnfnodestorages.yaml index d7fd4f8e4..ba9ddace8 100644 --- a/config/crd/patches/webhook_in_nodestorages.yaml +++ b/config/crd/patches/webhook_in_nnfnodestorages.yaml @@ -12,3 +12,5 @@ spec: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_storages.yaml b/config/crd/patches/webhook_in_nnfstorages.yaml similarity index 89% rename from config/crd/patches/webhook_in_storages.yaml rename to config/crd/patches/webhook_in_nnfstorages.yaml index a950d3d98..9d3c37bda 100644 --- a/config/crd/patches/webhook_in_storages.yaml +++ b/config/crd/patches/webhook_in_nnfstorages.yaml @@ -12,3 +12,5 @@ spec: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_nnfsystemstorages.yaml b/config/crd/patches/webhook_in_nnfsystemstorages.yaml new file mode 100644 index 000000000..3736c6561 --- /dev/null +++ b/config/crd/patches/webhook_in_nnfsystemstorages.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nnfsystemstorages.nnf.cray.hpe.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/dp0/manager_volumes_patch.yaml b/config/dp0/manager_volumes_patch.yaml index a7a2233f5..bee1a3573 100644 --- a/config/dp0/manager_volumes_patch.yaml +++ b/config/dp0/manager_volumes_patch.yaml @@ -10,6 +10,7 @@ spec: - name: manager args: - --controller=node + - --deleteUnknownVolumes volumeMounts: - mountPath: /mnt name: mnt-dir diff --git a/config/examples-htx/nnfstorageprofile_patch.yaml b/config/examples-htx/nnfstorageprofile_patch.yaml index 298c85fc0..d731eb9b1 100644 --- a/config/examples-htx/nnfstorageprofile_patch.yaml +++ b/config/examples-htx/nnfstorageprofile_patch.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfStorageProfile metadata: name: template diff --git a/config/examples/kustomization.yaml b/config/examples/kustomization.yaml index e66e08414..20c9b235c 100644 --- a/config/examples/kustomization.yaml +++ b/config/examples/kustomization.yaml @@ -1,9 +1,9 @@ namespace: nnf-system resources: -- nnf_v1alpha1_nnfcontainerprofiles.yaml -- nnf_v1alpha1_nnfdatamovementprofile.yaml -- nnf_v1alpha1_nnfstorageprofile.yaml +- nnf_nnfcontainerprofiles.yaml +- nnf_nnfdatamovementprofile.yaml +- nnf_nnfstorageprofile.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization diff --git a/config/examples/nnf_v1alpha1_nnfcontainerprofiles.yaml b/config/examples/nnf_nnfcontainerprofiles.yaml similarity index 93% rename from config/examples/nnf_v1alpha1_nnfcontainerprofiles.yaml rename to config/examples/nnf_nnfcontainerprofiles.yaml index 80d8c8035..10714ef07 100644 --- a/config/examples/nnf_v1alpha1_nnfcontainerprofiles.yaml +++ b/config/examples/nnf_nnfcontainerprofiles.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-success @@ -20,7 +20,7 @@ data: - -c - "sleep 10 && exit 0" --- -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-fail @@ -34,7 +34,7 @@ data: - -c - "sleep 10 && exit 1" --- -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-randomly-fail @@ -59,7 +59,7 @@ data: echo "exiting: $x" exit $x --- -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-forever @@ -79,7 +79,7 @@ data: - -c - "while true; do date && sleep 5; done" --- -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-mpi @@ -117,7 +117,7 @@ data: image: nnf-mfu:latest --- -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-mpi-fail @@ -145,7 +145,7 @@ data: - name: example-mpi-fail image: nnf-mfu:latest --- -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfContainerProfile metadata: name: example-mpi-webserver diff --git a/config/examples/nnf_v1alpha1_nnfdatamovementprofile.yaml b/config/examples/nnf_nnfdatamovementprofile.yaml similarity index 92% rename from config/examples/nnf_v1alpha1_nnfdatamovementprofile.yaml rename to config/examples/nnf_nnfdatamovementprofile.yaml index 53aa2d4fa..a67eb6dee 100644 --- a/config/examples/nnf_v1alpha1_nnfdatamovementprofile.yaml +++ b/config/examples/nnf_nnfdatamovementprofile.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfDataMovementProfile metadata: name: template diff --git a/config/examples/nnf_v1alpha1_nnfstorageprofile.yaml b/config/examples/nnf_nnfstorageprofile.yaml similarity index 98% rename from config/examples/nnf_v1alpha1_nnfstorageprofile.yaml rename to config/examples/nnf_nnfstorageprofile.yaml index a793a0efc..03686d700 100644 --- a/config/examples/nnf_v1alpha1_nnfstorageprofile.yaml +++ b/config/examples/nnf_nnfstorageprofile.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfStorageProfile metadata: name: template diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 852ed6621..956cd755e 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -18,4 +18,4 @@ kind: Kustomization images: - name: controller newName: ghcr.io/nearnodeflash/nnf-sos - newTag: 0.1.12 + newTag: 0.1.14 diff --git a/config/ports/port_manager.yaml b/config/ports/port_manager.yaml index 2b7bcad12..134416d03 100644 --- a/config/ports/port_manager.yaml +++ b/config/ports/port_manager.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha1 +apiVersion: nnf.cray.hpe.com/v1alpha2 kind: NnfPortManager metadata: name: port-manager @@ -6,4 +6,4 @@ spec: systemConfiguration: name: default namespace: default - allocations: [] \ No newline at end of file + allocations: [] diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index e8a278470..01308185a 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -31,6 +31,30 @@ configurations: # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines # if you do not want those helpers be installed with your Project. +- nnfstorageprofile_editor_role.yaml +- nnfstorageprofile_viewer_role.yaml +- nnfstorage_editor_role.yaml +- nnfstorage_viewer_role.yaml +- nnfportmanager_editor_role.yaml +- nnfportmanager_viewer_role.yaml +- nnfnodestorage_editor_role.yaml +- nnfnodestorage_viewer_role.yaml +- nnfnodeecdata_editor_role.yaml +- nnfnodeecdata_viewer_role.yaml +- nnfnodeblockstorage_editor_role.yaml +- nnfnodeblockstorage_viewer_role.yaml +- nnfnode_editor_role.yaml +- nnfnode_viewer_role.yaml +- nnfdatamovementprofile_editor_role.yaml +- nnfdatamovementprofile_viewer_role.yaml +- nnfdatamovementmanager_editor_role.yaml +- nnfdatamovementmanager_viewer_role.yaml +- nnfdatamovement_editor_role.yaml +- nnfdatamovement_viewer_role.yaml +- nnfcontainerprofile_editor_role.yaml +- nnfcontainerprofile_viewer_role.yaml +- nnfaccess_editor_role.yaml +- nnfaccess_viewer_role.yaml - nnfsystemstorage_editor_role.yaml - nnfsystemstorage_viewer_role.yaml - nnflustremgt_editor_role.yaml diff --git a/config/rbac/nnfdatamovement_viewer_role.yaml b/config/rbac/nnfdatamovement_viewer_role.yaml new file mode 100644 index 000000000..78a6bcc86 --- /dev/null +++ b/config/rbac/nnfdatamovement_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view nnfdatamovements. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovement-viewer-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfdatamovements + verbs: + - get + - list + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfdatamovements/status + verbs: + - get diff --git a/config/rbac/nnfnode_editor_role.yaml b/config/rbac/nnfnode_editor_role.yaml new file mode 100644 index 000000000..40b140926 --- /dev/null +++ b/config/rbac/nnfnode_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit nnfnodes. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnode-editor-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodes/status + verbs: + - get diff --git a/config/rbac/nnfnode_viewer_role.yaml b/config/rbac/nnfnode_viewer_role.yaml new file mode 100644 index 000000000..6594defa6 --- /dev/null +++ b/config/rbac/nnfnode_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view nnfnodes. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnode-viewer-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodes + verbs: + - get + - list + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodes/status + verbs: + - get diff --git a/config/rbac/nnfnodeblockstorage_editor_role.yaml b/config/rbac/nnfnodeblockstorage_editor_role.yaml new file mode 100644 index 000000000..ccf86087b --- /dev/null +++ b/config/rbac/nnfnodeblockstorage_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit nnfnodeblockstorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodeblockstorage-editor-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages/status + verbs: + - get diff --git a/config/rbac/nnfnodeblockstorage_viewer_role.yaml b/config/rbac/nnfnodeblockstorage_viewer_role.yaml new file mode 100644 index 000000000..4f0d19766 --- /dev/null +++ b/config/rbac/nnfnodeblockstorage_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view nnfnodeblockstorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodeblockstorage-viewer-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages + verbs: + - get + - list + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages/status + verbs: + - get diff --git a/config/rbac/nnfnodestorage_editor_role.yaml b/config/rbac/nnfnodestorage_editor_role.yaml new file mode 100644 index 000000000..bc07b1a90 --- /dev/null +++ b/config/rbac/nnfnodestorage_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit nnfnodestorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodestorage-editor-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodestorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodestorages/status + verbs: + - get diff --git a/config/rbac/nnfnodestorage_viewer_role.yaml b/config/rbac/nnfnodestorage_viewer_role.yaml new file mode 100644 index 000000000..18c27b601 --- /dev/null +++ b/config/rbac/nnfnodestorage_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view nnfnodestorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodestorage-viewer-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodestorages + verbs: + - get + - list + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodestorages/status + verbs: + - get diff --git a/config/rbac/nnfstorage_editor_role.yaml b/config/rbac/nnfstorage_editor_role.yaml new file mode 100644 index 000000000..0588055b8 --- /dev/null +++ b/config/rbac/nnfstorage_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit nnfstorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfstorage-editor-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfstorages/status + verbs: + - get diff --git a/config/rbac/nnfstorage_viewer_role.yaml b/config/rbac/nnfstorage_viewer_role.yaml new file mode 100644 index 000000000..20f12b891 --- /dev/null +++ b/config/rbac/nnfstorage_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view nnfstorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfstorage-viewer-role +rules: +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfstorages + verbs: + - get + - list + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfstorages/status + verbs: + - get diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index bfffd9415..3c27ed5c3 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -12,4 +12,18 @@ resources: - nnf_v1alpha1_nnflustremgt.yaml - nnf_v1alpha1_nnfdatamovementprofile.yaml - nnf_v1alpha1_nnfsystemstorage.yaml +- nnf_v1alpha2_nnfaccess.yaml +- nnf_v1alpha2_nnfcontainerprofile.yaml +- nnf_v1alpha2_nnfdatamovement.yaml +- nnf_v1alpha2_nnfdatamovementmanager.yaml +- nnf_v1alpha2_nnfdatamovementprofile.yaml +- nnf_v1alpha2_nnflustremgt.yaml +- nnf_v1alpha2_nnfnode.yaml +- nnf_v1alpha2_nnfnodeblockstorage.yaml +- nnf_v1alpha2_nnfnodeecdata.yaml +- nnf_v1alpha2_nnfnodestorage.yaml +- nnf_v1alpha2_nnfportmanager.yaml +- nnf_v1alpha2_nnfstorage.yaml +- nnf_v1alpha2_nnfstorageprofile.yaml +- nnf_v1alpha2_nnfsystemstorage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/nnf_v1alpha2_nnfaccess.yaml b/config/samples/nnf_v1alpha2_nnfaccess.yaml new file mode 100644 index 000000000..aee9357f7 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfaccess.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfAccess +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfaccess-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfcontainerprofile.yaml b/config/samples/nnf_v1alpha2_nnfcontainerprofile.yaml new file mode 100644 index 000000000..e51d9658a --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfcontainerprofile.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfContainerProfile +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfcontainerprofile-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfdatamovement.yaml b/config/samples/nnf_v1alpha2_nnfdatamovement.yaml new file mode 100644 index 000000000..21708bbdd --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfdatamovement.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfDataMovement +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovement-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfdatamovementmanager.yaml b/config/samples/nnf_v1alpha2_nnfdatamovementmanager.yaml new file mode 100644 index 000000000..7afe3e9b7 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfdatamovementmanager.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfDataMovementManager +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovementmanager-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfdatamovementprofile.yaml b/config/samples/nnf_v1alpha2_nnfdatamovementprofile.yaml new file mode 100644 index 000000000..abd3083de --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfdatamovementprofile.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfDataMovementProfile +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovementprofile-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnflustremgt.yaml b/config/samples/nnf_v1alpha2_nnflustremgt.yaml new file mode 100644 index 000000000..f76174693 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnflustremgt.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfLustreMGT +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnflustremgt-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfnode.yaml b/config/samples/nnf_v1alpha2_nnfnode.yaml new file mode 100644 index 000000000..5fef744c4 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfnode.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfNode +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnode-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfnodeblockstorage.yaml b/config/samples/nnf_v1alpha2_nnfnodeblockstorage.yaml new file mode 100644 index 000000000..d3ac3e190 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfnodeblockstorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfNodeBlockStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodeblockstorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfnodeecdata.yaml b/config/samples/nnf_v1alpha2_nnfnodeecdata.yaml new file mode 100644 index 000000000..2a50eb9b5 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfnodeecdata.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfNodeECData +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodeecdata-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfnodestorage.yaml b/config/samples/nnf_v1alpha2_nnfnodestorage.yaml new file mode 100644 index 000000000..5ca73210f --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfnodestorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfNodeStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodestorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfportmanager.yaml b/config/samples/nnf_v1alpha2_nnfportmanager.yaml new file mode 100644 index 000000000..888ffbd01 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfportmanager.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfPortManager +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfportmanager-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfstorage.yaml b/config/samples/nnf_v1alpha2_nnfstorage.yaml new file mode 100644 index 000000000..a1ea9eebb --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfstorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfstorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfstorageprofile.yaml b/config/samples/nnf_v1alpha2_nnfstorageprofile.yaml new file mode 100644 index 000000000..6a2aa6065 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfstorageprofile.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfStorageProfile +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfstorageprofile-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha2_nnfsystemstorage.yaml b/config/samples/nnf_v1alpha2_nnfsystemstorage.yaml new file mode 100644 index 000000000..b1b077887 --- /dev/null +++ b/config/samples/nnf_v1alpha2_nnfsystemstorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha2 +kind: NnfSystemStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfsystemstorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index e4a116931..a3bdcc183 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -10,14 +10,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-nnf-cray-hpe-com-v1alpha1-nnfcontainerprofile + path: /validate-nnf-cray-hpe-com-v1alpha2-nnfcontainerprofile failurePolicy: Fail name: vnnfcontainerprofile.kb.io rules: - apiGroups: - nnf.cray.hpe.com apiVersions: - - v1alpha1 + - v1alpha2 operations: - CREATE - UPDATE @@ -30,14 +30,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-nnf-cray-hpe-com-v1alpha1-nnfdatamovementprofile + path: /validate-nnf-cray-hpe-com-v1alpha2-nnfdatamovementprofile failurePolicy: Fail name: vnnfdatamovementprofile.kb.io rules: - apiGroups: - nnf.cray.hpe.com apiVersions: - - v1alpha1 + - v1alpha2 operations: - CREATE - UPDATE @@ -50,14 +50,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-nnf-cray-hpe-com-v1alpha1-nnfstorageprofile + path: /validate-nnf-cray-hpe-com-v1alpha2-nnfstorageprofile failurePolicy: Fail name: vnnfstorageprofile.kb.io rules: - apiGroups: - nnf.cray.hpe.com apiVersions: - - v1alpha1 + - v1alpha2 operations: - CREATE - UPDATE diff --git a/crd-bumper.yaml b/crd-bumper.yaml new file mode 100644 index 000000000..404f07f22 --- /dev/null +++ b/crd-bumper.yaml @@ -0,0 +1,10 @@ +# A comma-separated list of directories where more Go code can be found, beyond +# the usual cmd/, api/, internal/ that kubebuilder would put in place. The Go +# files in these dirs will be bumped to the new hub version. +extra_go_dirs: mount-daemon + +# A comma-separated list of directories of Kustomize config files that have +# references to the API and that must be updated to the new hub version so +# that ArgoCD can sync them. +extra_config_dirs: config/examples,config/ports,config/dws + diff --git a/github/cluster-api/util/conversion/conversion_test.go b/github/cluster-api/util/conversion/conversion_test.go index 9590bedee..e8160722d 100644 --- a/github/cluster-api/util/conversion/conversion_test.go +++ b/github/cluster-api/util/conversion/conversion_test.go @@ -19,27 +19,1634 @@ package conversion import ( "testing" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - //"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - //"k8s.io/apimachinery/pkg/runtime/schema" - //nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" ) var ( + oldNnfAccessGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfAccess", + } + + oldNnfContainerProfileGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfContainerProfile", + } + + oldNnfDataMovementGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfDataMovement", + } + + oldNnfDataMovementManagerGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfDataMovementManager", + } + + oldNnfDataMovementProfileGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfDataMovementProfile", + } + + oldNnfLustreMGTGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfLustreMGT", + } + + oldNnfNodeGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfNode", + } + + oldNnfNodeBlockStorageGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfNodeBlockStorage", + } + + oldNnfNodeECDataGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfNodeECData", + } + + oldNnfNodeStorageGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfNodeStorage", + } + + oldNnfPortManagerGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfPortManager", + } + + oldNnfStorageGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfStorage", + } + + oldNnfStorageProfileGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfStorageProfile", + } + + oldNnfSystemStorageGVK = schema.GroupVersionKind{ + Group: nnfv1alpha2.GroupVersion.Group, + Version: "v1old", + Kind: "NnfSystemStorage", + } // +crdbumper:scaffold:gvk ) func TestMarshalData(t *testing.T) { - _ = NewWithT(t) + g := NewWithT(t) + + t.Run("NnfAccess should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfAccessSpec{ + DesiredState: "mounted", + UserID: 1551, + GroupID: 2442, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfAccessGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("mounted")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("2442")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("1551")) + }) + + t.Run("NnfAccess should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfAccess")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfContainerProfile should write source object to destination", func(*testing.T) { + prerun := int64(345) + userid := uint32(7667) + groupid := uint32(8448) + src := &nnfv1alpha2.NnfContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Data: nnfv1alpha2.NnfContainerProfileData{ + PreRunTimeoutSeconds: &prerun, + UserID: &userid, + GroupID: &groupid, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfContainerProfileGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("345")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("7667")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("8448")) + }) + + t.Run("NnfContainerProfile should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfContainerProfile")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfDataMovement should write source object to destination", func(*testing.T) { + destpath := &nnfv1alpha2.NnfDataMovementSpecSourceDestination{ + Path: "little/red", + } + srcpath := &nnfv1alpha2.NnfDataMovementSpecSourceDestination{ + Path: "/dev/null", + } + src := &nnfv1alpha2.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfDataMovementSpec{ + Destination: destpath, + Source: srcpath, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfDataMovementGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("little/red")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("/dev/null")) + }) + + t.Run("NnfDataMovement should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfDataMovement")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfDataMovementManager should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovementManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfDataMovementManagerSpec{ + HostPath: "/this/dir", + MountPath: "/mnts", + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfDataMovementManagerGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("/this/dir")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("/mnts")) + }) + + t.Run("NnfDataMovementManager should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovementManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfDataMovementManager")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfDataMovementProfile should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovementProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Data: nnfv1alpha2.NnfDataMovementProfileData{ + Command: "mpirun is cool", + StatCommand: "stat --something", + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfDataMovementProfileGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("mpirun is cool")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("stat --something")) + }) + + t.Run("NnfDataMovementProfile should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovementProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfDataMovementProfile")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfLustreMGT should write source object to destination", func(*testing.T) { + blacklist := []string{"black-fly", "black bird"} + src := &nnfv1alpha2.NnfLustreMGT{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfLustreMGTSpec{ + FsNameStart: "aaaa-pizza", + FsNameBlackList: blacklist, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfLustreMGTGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("aaaa-pizza")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("black-fly")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("black bird")) + }) + + t.Run("NnfLustreMGT should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfLustreMGT{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfLustreMGT")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfNode should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfNodeSpec{ + Name: "rabbit-1", + Pod: "nnf-thingy-122", + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("rabbit-1")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("nnf-thingy-122")) + }) + + t.Run("NnfNode should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfNode")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfNodeBlockStorage should write source object to destination", func(*testing.T) { + alloc := []nnfv1alpha2.NnfNodeBlockStorageAllocationSpec{ + {Access: []string{"rabbit-44", "rabbit-10002"}}, + } + src := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfNodeBlockStorageSpec{ + Allocations: alloc, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeBlockStorageGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("rabbit-44")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("rabbit-10002")) + }) + + t.Run("NnfNodeBlockStorage should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfNodeBlockStorage")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfNodeECData should write source object to destination", func(*testing.T) { + elem1 := nnfv1alpha2.NnfNodeECPrivateData{"element1": "the world"} + priv := map[string]nnfv1alpha2.NnfNodeECPrivateData{ + "thing1": elem1, + } + src := &nnfv1alpha2.NnfNodeECData{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Status: nnfv1alpha2.NnfNodeECDataStatus{ + Data: priv, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeECDataGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("thing1")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("element1")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("the world")) + }) + + t.Run("NnfNodeECData should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeECData{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfNodeECData")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfNodeStorage should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfNodeStorageSpec{ + UserID: 4997, + GroupID: 2112, + FileSystemType: "gfs2", + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeStorageGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("4997")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("2112")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("gfs2")) + }) + + t.Run("NnfNodeStorage should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfNodeStorage")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfPortManager should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfPortManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfPortManagerSpec{ + SystemConfiguration: corev1.ObjectReference{ + Namespace: "willy-wonka", + Name: "candy-land", + }, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfPortManagerGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("willy-wonka")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("candy-land")) + }) + + t.Run("NnfPortManager should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfPortManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfPortManager")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfStorage should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfStorageSpec{ + FileSystemType: "gfs2", + UserID: 4004, + GroupID: 2992, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfStorageGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("gfs2")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("4004")) + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("2992")) + }) + + t.Run("NnfStorage should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfStorage")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfStorageProfile should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfStorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Data: nnfv1alpha2.NnfStorageProfileData{ + LustreStorage: nnfv1alpha2.NnfStorageProfileLustreData{ + ExternalMGS: "kfi@1:this@that", + }, + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfStorageProfileGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("kfi@1:this@that")) + }) + + t.Run("NnfStorageProfile should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfStorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfStorageProfile")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) + + t.Run("NnfSystemStorage should write source object to destination", func(*testing.T) { + src := &nnfv1alpha2.NnfSystemStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Labels: map[string]string{ + "label1": "", + }, + }, + Spec: nnfv1alpha2.NnfSystemStorageSpec{ + ClientMountPath: "/on/this", + }, + } + + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfSystemStorageGVK) + dst.SetName("test-1") + + g.Expect(MarshalData(src, dst)).To(Succeed()) + // ensure the src object is not modified + g.Expect(src.GetLabels()).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(BeEmpty()) + + g.Expect(dst.GetAnnotations()[DataAnnotation]).To(ContainSubstring("/on/this")) + }) + + t.Run("NnfSystemStorage should append the annotation", func(*testing.T) { + src := &nnfv1alpha2.NnfSystemStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(nnfv1alpha2.GroupVersion.WithKind("NnfSystemStorage")) + dst.SetName("test-1") + dst.SetAnnotations(map[string]string{ + "annotation": "1", + }) + + g.Expect(MarshalData(src, dst)).To(Succeed()) + g.Expect(dst.GetAnnotations()).To(HaveLen(2)) + }) // +crdbumper:scaffold:marshaldata } func TestUnmarshalData(t *testing.T) { - _ = NewWithT(t) + g := NewWithT(t) + + t.Run("NnfAccess should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfAccessGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfAccess should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfAccessGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfAccess should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfAccessGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfContainerProfile should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfContainerProfileGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfContainerProfile should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfContainerProfileGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfContainerProfile should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfContainerProfileGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfDataMovement should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfDataMovementGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfDataMovement should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfDataMovementGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfDataMovement should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfDataMovementGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfDataMovementManager should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovementManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfDataMovementManagerGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfDataMovementManager should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfDataMovementManagerGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfDataMovementManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfDataMovementManager should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfDataMovementManagerGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfDataMovementManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfDataMovementProfile should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfDataMovementProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfDataMovementProfileGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfDataMovementProfile should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfDataMovementProfileGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfDataMovementProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfDataMovementProfile should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfDataMovementProfileGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfDataMovementProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfLustreMGT should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfLustreMGT{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfLustreMGTGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfLustreMGT should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfLustreMGTGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfLustreMGT{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfLustreMGT should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfLustreMGTGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfLustreMGT{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfNode should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfNode should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfNode should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfNodeBlockStorage should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeBlockStorageGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfNodeBlockStorage should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeBlockStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfNodeBlockStorage should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeBlockStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfNodeECData should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeECData{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeECDataGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfNodeECData should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeECDataGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNodeECData{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfNodeECData should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeECDataGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNodeECData{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfNodeStorage should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfNodeStorageGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfNodeStorage should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfNodeStorage should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfNodeStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfPortManager should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfPortManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfPortManagerGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfPortManager should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfPortManagerGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfPortManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfPortManager should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfPortManagerGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfPortManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfStorage should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfStorageGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfStorage should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfStorage should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfStorageProfile should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfStorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfStorageProfileGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfStorageProfile should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfStorageProfileGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfStorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfStorageProfile should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfStorageProfileGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfStorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) + + t.Run("NnfSystemStorage should return false without errors if annotation doesn't exist", func(*testing.T) { + src := &nnfv1alpha2.NnfSystemStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + dst := &unstructured.Unstructured{} + dst.SetGroupVersionKind(oldNnfSystemStorageGVK) + dst.SetName("test-1") + + ok, err := UnmarshalData(src, dst) + g.Expect(ok).To(BeFalse()) + g.Expect(err).ToNot(HaveOccurred()) + }) + + t.Run("NnfSystemStorage should return true when a valid annotation with data exists", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfSystemStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfSystemStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(dst.GetLabels()).To(HaveLen(1)) + g.Expect(dst.GetName()).To(Equal("test-1")) + g.Expect(dst.GetLabels()).To(HaveKeyWithValue("label1", "")) + g.Expect(dst.GetAnnotations()).To(BeEmpty()) + }) + + t.Run("NnfSystemStorage should clean the annotation on successful unmarshal", func(*testing.T) { + src := &unstructured.Unstructured{} + src.SetGroupVersionKind(oldNnfSystemStorageGVK) + src.SetName("test-1") + src.SetAnnotations(map[string]string{ + "annotation-1": "", + DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", + }) + + dst := &nnfv1alpha2.NnfSystemStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + }, + } + + ok, err := UnmarshalData(src, dst) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + g.Expect(src.GetAnnotations()).ToNot(HaveKey(DataAnnotation)) + g.Expect(src.GetAnnotations()).To(HaveLen(1)) + }) // +crdbumper:scaffold:unmarshaldata } diff --git a/go.mod b/go.mod index e56710024..21b1dcee2 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/NearNodeFlash/nnf-sos go 1.21 require ( - github.com/DataWorkflowServices/dws v0.0.1-0.20240820212105-5950825b3d74 - github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240820214524-99d5da17471d - github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240820195316-cb407b151cb4 + github.com/DataWorkflowServices/dws v0.0.1-0.20240913193141-737bcd946a02 + github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240913195900-b3285e54755e + github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240912200758-f862bc773739 github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.4.1 github.com/google/go-cmp v0.6.0 diff --git a/go.sum b/go.sum index d85bcdb22..41c4731d1 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataWorkflowServices/dws v0.0.1-0.20240820212105-5950825b3d74 h1:u0ZqL8mEyKoIOo7ahBTQSjcmdu4O2rzDCO+h9oJ2/O8= -github.com/DataWorkflowServices/dws v0.0.1-0.20240820212105-5950825b3d74/go.mod h1:6MrEEHISskyooSKcKU6R3mFqH6Yh6KzWgajhcw2s+nM= +github.com/DataWorkflowServices/dws v0.0.1-0.20240913193141-737bcd946a02 h1:ZidnleTEjdZX6geLAWTw3NMEsO8YLEQ9ubPx1fKGDeg= +github.com/DataWorkflowServices/dws v0.0.1-0.20240913193141-737bcd946a02/go.mod h1:6MrEEHISskyooSKcKU6R3mFqH6Yh6KzWgajhcw2s+nM= github.com/HewlettPackard/structex v1.0.4 h1:RVTdN5FWhDWr1IkjllU8wxuLjISo4gr6u5ryZpzyHcA= github.com/HewlettPackard/structex v1.0.4/go.mod h1:3frC4RY/cPsP/4+N8rkxsNAGlQwHV+zDC7qvrN+N+rE= -github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240820214524-99d5da17471d h1:iiE1EhefucdPBJwYfRRGIqDXnBXTV9tfgn+trJc7RSg= -github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240820214524-99d5da17471d/go.mod h1:N5X1obpl0mBI0VoCJdQhv7cFXOC6g3VlXj712qWj0JE= -github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240820195316-cb407b151cb4 h1:cxUkRTnynEvCLbYL+d/FVyITLODO/goscivJLq5kipY= -github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240820195316-cb407b151cb4/go.mod h1:oxdwMqfttOF9dabJhqrWlirCnMk8/8eyLMwl+hducjk= +github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240913195900-b3285e54755e h1:fvJHwXbEDss0m642zVe3Hra0y/zh49Vnhqk27I3HrRc= +github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240913195900-b3285e54755e/go.mod h1:VP4+66Hv5XnaJgzaQ0fooB4r4141aM5saoo8nCZQTGA= +github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240912200758-f862bc773739 h1:T49ixk8TBeiYQ3CPpx463GL2gKk94dD9nXpoLBfdmqg= +github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240912200758-f862bc773739/go.mod h1:oxdwMqfttOF9dabJhqrWlirCnMk8/8eyLMwl+hducjk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= diff --git a/hack/prefix-webhook-names.sh b/hack/prefix-webhook-names.sh new file mode 100755 index 000000000..ba2027120 --- /dev/null +++ b/hack/prefix-webhook-names.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Copyright 2022-2024 Hewlett Packard Enterprise Development LP +# Other additional copyright holders may be indicated within. +# +# The entirety of this work is licensed under the Apache License, +# Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# To allow suite_test.go to run webhooks from multiple repos, we have to +# adjust the webhook names. They were meant to be processed by kustomize, +# which would have prepended "namePrefix" to the names, but envtest doesn't +# use that. This tool copies the webhook config and adds the name prefix. + +SOURCE_DIR=$1 +DEST_DIR=$2 +GROUP=$3 + +mkdir -p $DEST_DIR +cp $SOURCE_DIR/* $DEST_DIR +sed -i.bak -e "s/validating-webhook-configuration/$GROUP-validating-webhook-configuration/" -e "s/mutating-webhook-configuration/$GROUP-mutating-webhook-configuration/" $DEST_DIR/manifests.yaml +rm $DEST_DIR/manifests.yaml.bak + +sed -i.bak -e "s/webhook-service/$GROUP-webhook-service/" $DEST_DIR/service.yaml +rm $DEST_DIR/service.yaml.bak + +if [[ -f $DEST_DIR/service_account.yaml ]]; then + sed -i.bak -e "s/webhook/$GROUP-webhook/" $DEST_DIR/service_account.yaml + rm $DEST_DIR/service_account.yaml.bak +fi + +exit 0 diff --git a/internal/controller/conversion_test.go b/internal/controller/conversion_test.go new file mode 100644 index 000000000..0e6055889 --- /dev/null +++ b/internal/controller/conversion_test.go @@ -0,0 +1,735 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + "context" + + "github.com/google/uuid" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" + utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" +) + +var _ = Describe("Conversion Webhook Test", func() { + + // Don't get deep into verifying the conversion. + // We have api//conversion_test.go that is digging deep. + // We're just verifying that the conversion webhook is hooked up. + + // Note: if a resource is accessed by its spoke API, then it should + // have the utilconversion.DataAnnotation annotation. It will not + // have that annotation when it is accessed by its hub API. + + Context("NnfAccess", func() { + var resHub *nnfv1alpha2.NnfAccess + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfAccessSpec{ + DesiredState: "mounted", + TeardownState: "Teardown", + Target: "all", + UserID: 1001, + GroupID: 2002, + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfAccess{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfAccess resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfAccess{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfAccess" + }) + + Context("NnfContainerProfile", func() { + var resHub *nnfv1alpha2.NnfContainerProfile + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Data: nnfv1alpha2.NnfContainerProfileData{ + Spec: &corev1.PodSpec{ + NodeName: "rabbit-1", + Containers: []corev1.Container{{Name: "one"}}, + }, + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfContainerProfile{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfContainerProfile resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfContainerProfile{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfContainerProfile" + }) + + Context("NnfDataMovement", func() { + var resHub *nnfv1alpha2.NnfDataMovement + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfDataMovement{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfDataMovementSpec{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfDataMovement{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfDataMovement resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfDataMovement{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfDataMovement" + }) + + Context("NnfDataMovementManager", func() { + var resHub *nnfv1alpha2.NnfDataMovementManager + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfDataMovementManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfDataMovementManagerSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "dm-worker-dummy", + Image: "nginx", + }}, + }, + }, + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfDataMovementManager{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfDataMovementManager resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfDataMovementManager{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfDataMovementManager" + }) + + Context("NnfDataMovementProfile", func() { + var resHub *nnfv1alpha2.NnfDataMovementProfile + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfDataMovementProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Data: nnfv1alpha2.NnfDataMovementProfileData{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfDataMovementProfile{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfDataMovementProfile resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfDataMovementProfile{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfDataMovementProfile" + }) + + Context("NnfLustreMGT", func() { + var resHub *nnfv1alpha2.NnfLustreMGT + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfLustreMGT{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfLustreMGTSpec{ + Addresses: []string{"rabbit-1@tcp", "rabbit-2@tcp"}, + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfLustreMGT{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfLustreMGT resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfLustreMGT{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfLustreMGT" + }) + + Context("NnfNode", func() { + var resHub *nnfv1alpha2.NnfNode + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfNodeSpec{ + State: "Enable", + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfNode{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfNode resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfNode{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfNode" + }) + + Context("NnfNodeBlockStorage", func() { + var resHub *nnfv1alpha2.NnfNodeBlockStorage + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfNodeBlockStorageSpec{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfNodeBlockStorage{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfNodeBlockStorage resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfNodeBlockStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfNodeBlockStorage" + }) + + Context("NnfNodeECData", func() { + var resHub *nnfv1alpha2.NnfNodeECData + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfNodeECData{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfNodeECDataSpec{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfNodeECData{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfNodeECData resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfNodeECData{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfNodeECData" + }) + + Context("NnfNodeStorage", func() { + var resHub *nnfv1alpha2.NnfNodeStorage + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfNodeStorageSpec{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfNodeStorage{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfNodeStorage resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfNodeStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfNodeStorage" + }) + + Context("NnfPortManager", func() { + var resHub *nnfv1alpha2.NnfPortManager + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfPortManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfPortManagerSpec{ + Allocations: make([]nnfv1alpha2.NnfPortManagerAllocationSpec, 0), + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfPortManager{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfPortManager resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfPortManager{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfPortManager" + }) + + Context("NnfStorage", func() { + var resHub *nnfv1alpha2.NnfStorage + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfStorageSpec{ + AllocationSets: []nnfv1alpha2.NnfStorageAllocationSetSpec{}, + }, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfStorage{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfStorage resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfStorage" + }) + + Context("NnfStorageProfile", func() { + var resHub *nnfv1alpha2.NnfStorageProfile + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfStorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Data: nnfv1alpha2.NnfStorageProfileData{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfStorageProfile{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfStorageProfile resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfStorageProfile{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfStorageProfile" + }) + + Context("NnfSystemStorage", func() { + var resHub *nnfv1alpha2.NnfSystemStorage + + BeforeEach(func() { + id := uuid.NewString()[0:8] + resHub = &nnfv1alpha2.NnfSystemStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: id, + Namespace: corev1.NamespaceDefault, + }, + Spec: nnfv1alpha2.NnfSystemStorageSpec{}, + } + + Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) + }) + + AfterEach(func() { + if resHub != nil { + Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) + expected := &nnfv1alpha2.NnfSystemStorage{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) + }).ShouldNot(Succeed()) + } + }) + + It("reads NnfSystemStorage resource via hub and via spoke v1alpha1", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha1.NnfSystemStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + + // +crdbumper:scaffold:spoketest="nnf.NnfSystemStorage" + }) + + // +crdbumper:scaffold:webhooksuitetest +}) diff --git a/internal/controller/directivebreakdown_controller.go b/internal/controller/directivebreakdown_controller.go index 56d9afa40..c848cb9ef 100644 --- a/internal/controller/directivebreakdown_controller.go +++ b/internal/controller/directivebreakdown_controller.go @@ -43,7 +43,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -624,7 +624,7 @@ func populateStorageAllocationSet(a *dwsv1alpha2.StorageAllocationSet, strategy func (r *DirectiveBreakdownReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ &dwsv1alpha2.ServersList{}, - &nnfv1alpha1.NnfStorageProfileList{}, + &nnfv1alpha2.NnfStorageProfileList{}, &dwsv1alpha2.PersistentStorageInstanceList{}, } @@ -634,6 +634,6 @@ func (r *DirectiveBreakdownReconciler) SetupWithManager(mgr ctrl.Manager) error For(&dwsv1alpha2.DirectiveBreakdown{}). Owns(&dwsv1alpha2.Servers{}). Owns(&dwsv1alpha2.PersistentStorageInstance{}). - Owns(&nnfv1alpha1.NnfStorageProfile{}). + Owns(&nnfv1alpha2.NnfStorageProfile{}). Complete(r) } diff --git a/internal/controller/directivebreakdown_controller_test.go b/internal/controller/directivebreakdown_controller_test.go index 016e6bfee..2747688f9 100644 --- a/internal/controller/directivebreakdown_controller_test.go +++ b/internal/controller/directivebreakdown_controller_test.go @@ -30,12 +30,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("DirectiveBreakdown test", func() { var ( - storageProfile *nnfv1alpha1.NnfStorageProfile + storageProfile *nnfv1alpha2.NnfStorageProfile ) BeforeEach(func() { @@ -45,7 +45,7 @@ var _ = Describe("DirectiveBreakdown test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) @@ -81,7 +81,7 @@ var _ = Describe("DirectiveBreakdown test", func() { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), servers) }).Should(Succeed(), "Create the DWS Servers Resource") - pinnedStorageProfile := &nnfv1alpha1.NnfStorageProfile{ + pinnedStorageProfile := &nnfv1alpha2.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: directiveBreakdown.GetName(), Namespace: directiveBreakdown.GetNamespace(), diff --git a/internal/controller/dws_servers_controller.go b/internal/controller/dws_servers_controller.go index 46296c2b5..86df24a37 100644 --- a/internal/controller/dws_servers_controller.go +++ b/internal/controller/dws_servers_controller.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -42,7 +42,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -170,7 +170,7 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * // Get the NnfStorage with the same name/namespace as the servers resource. It may not exist // yet if we're still in proposal phase, or if it was deleted in teardown. - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} if err := r.Get(ctx, types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}, nnfStorage); err != nil { if apierrors.IsNotFound(err) { return r.statusSetEmpty(ctx, servers) @@ -222,13 +222,13 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * // Loop through the nnfNodeStorages corresponding to each of the Rabbit nodes and find matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) - matchLabels[nnfv1alpha1.AllocationSetLabel] = label + matchLabels[nnfv1alpha2.AllocationSetLabel] = label listOptions := []client.ListOption{ matchLabels, } - nnfNodeBlockStorageList := &nnfv1alpha1.NnfNodeBlockStorageList{} + nnfNodeBlockStorageList := &nnfv1alpha2.NnfNodeBlockStorageList{} if err := r.List(ctx, nnfNodeBlockStorageList, listOptions...); err != nil { return ctrl.Result{}, err } @@ -363,7 +363,7 @@ func (r *DWSServersReconciler) checkDeletedStorage(ctx context.Context, servers log := r.Log.WithValues("Servers", types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}) // Get the NnfStorage with the same name/namespace as the servers resource - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} if err := r.Get(ctx, types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}, nnfStorage); err != nil { if apierrors.IsNotFound(err) { log.Info("NnfStorage is deleted") @@ -394,6 +394,6 @@ func (r *DWSServersReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&dwsv1alpha2.Servers{}). - Watches(&nnfv1alpha1.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(nnfStorageServersMapFunc)). + Watches(&nnfv1alpha2.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(nnfStorageServersMapFunc)). Complete(r) } diff --git a/internal/controller/dws_storage_controller.go b/internal/controller/dws_storage_controller.go index 2c4ac58b2..47a1baea1 100644 --- a/internal/controller/dws_storage_controller.go +++ b/internal/controller/dws_storage_controller.go @@ -38,7 +38,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) type DWSStorageReconciler struct { @@ -73,40 +73,28 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, client.IgnoreNotFound(err) } - // Check if the object is being deleted - if !storage.GetDeletionTimestamp().IsZero() { + // Only reconcile this Storage resource if it is marked as Rabbit Storage + labels := storage.GetLabels() + if labels == nil { return ctrl.Result{}, nil } - // Ensure the storage resource is updated with the latest NNF Node resource status - nnfNode := &nnfv1alpha1.NnfNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nnf-nlc", - Namespace: storage.GetName(), - }, + if storageType := labels[dwsv1alpha2.StorageTypeLabel]; storageType != "Rabbit" { + return ctrl.Result{}, nil } - if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNode), nnfNode); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } + // Create the status updater to update the status section if any changes are made + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.StorageStatus](storage) + defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() - // Now that it is confirmed an NNF Node resource exists for this Storage resource, - // ensure the proper labels are set on the resource - labels := storage.GetLabels() - if labels == nil { - labels = make(map[string]string) + // Check if the object is being deleted + if !storage.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil } - const rabbitStorageType = "Rabbit" - if label, found := labels[dwsv1alpha2.StorageTypeLabel]; !found || label != rabbitStorageType { - labels[dwsv1alpha2.StorageTypeLabel] = rabbitStorageType - storage.SetLabels(labels) - - if err := r.Update(ctx, storage); err != nil { - return ctrl.Result{}, err - } - - return ctrl.Result{Requeue: true}, nil + if storage.Spec.State == dwsv1alpha2.DisabledState { + storage.Status.Status = dwsv1alpha2.DisabledStatus + storage.Status.Message = "Storage node manually disabled" } if storage.Spec.Mode != "Live" { @@ -114,10 +102,17 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - // Create a new status + // Ensure the storage resource is updated with the latest NNF Node resource status + nnfNode := &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-nlc", + Namespace: storage.GetName(), + }, + } - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.StorageStatus](storage) - defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() + if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNode), nnfNode); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } storage.Status.Type = dwsv1alpha2.NVMe storage.Status.Capacity = nnfNode.Status.Capacity @@ -160,7 +155,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) device.Slot = drive.Slot device.Status = drive.Status.ConvertToDWSResourceStatus() - if drive.Status == nnfv1alpha1.ResourceReady { + if drive.Status == nnfv1alpha2.ResourceReady { wearLevel := drive.WearLevel device.Model = drive.Model device.SerialNumber = drive.SerialNumber @@ -176,61 +171,54 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } } - // Handle any state transitions - switch storage.Spec.State { - case dwsv1alpha2.EnabledState: - - // Clear the fence status if the storage resource is enabled from a disabled state - if storage.Status.Status == dwsv1alpha2.DisabledStatus { + // If the Rabbit is disabled we don't have to check the fenced status + if storage.Spec.State == dwsv1alpha2.DisabledState { + return ctrl.Result{}, nil + } - if nnfNode.Status.Fenced { - log.WithValues("fenced", nnfNode.Status.Fenced).Info("resource disabled") - nnfNode.Status.Fenced = false + // Clear the fence status if the storage resource is enabled from a disabled state + if storage.Status.Status == dwsv1alpha2.DisabledStatus { - if err := r.Status().Update(ctx, nnfNode); err != nil { - return ctrl.Result{}, err - } + if nnfNode.Status.Fenced { + log.WithValues("fenced", nnfNode.Status.Fenced).Info("resource disabled") + nnfNode.Status.Fenced = false - log.Info("fenced status cleared") - return ctrl.Result{}, nil + if err := r.Status().Update(ctx, nnfNode); err != nil { + return ctrl.Result{}, err } - // TODO: Fencing Agent Phase #2: Resume Rabbit NLC pods, wait for the pods to - // resume, then change Node Status to Enabled - - storage.Status.RebootRequired = false - storage.Status.Message = "" + log.Info("fenced status cleared") + return ctrl.Result{}, nil } - if nnfNode.Status.Fenced { - storage.Status.Status = dwsv1alpha2.DegradedStatus - storage.Status.RebootRequired = true - storage.Status.Message = "Storage node requires reboot to recover from STONITH event" - } else { - nodeState, err := r.coreNodeState(ctx, storage) - if err != nil { - return ctrl.Result{}, err - } + // TODO: Fencing Agent Phase #2: Resume Rabbit NLC pods, wait for the pods to + // resume, then change Node Status to Enabled - if !nodeState.nodeReady { - log.Info("storage node is offline") - storage.Status.Status = dwsv1alpha2.OfflineStatus - storage.Status.Message = "Kubernetes node is offline" - } else if len(nodeState.nnfTaint) > 0 { - log.Info(fmt.Sprintf("storage node is tainted with %s", nodeState.nnfTaint)) - storage.Status.Status = dwsv1alpha2.DrainedStatus - storage.Status.Message = fmt.Sprintf("Kubernetes node is tainted with %s", nodeState.nnfTaint) - } else { - storage.Status.Status = nnfNode.Status.Status.ConvertToDWSResourceStatus() - } - } + storage.Status.RebootRequired = false + storage.Status.Message = "" + } - case dwsv1alpha2.DisabledState: - // TODO: Fencing Agent Phase #2: Pause Rabbit NLC pods, wait for pods to be - // removed, then change Node Status to Disabled + if nnfNode.Status.Fenced { + storage.Status.Status = dwsv1alpha2.DegradedStatus + storage.Status.RebootRequired = true + storage.Status.Message = "Storage node requires reboot to recover from STONITH event" + } else { + nodeState, err := r.coreNodeState(ctx, storage) + if err != nil { + return ctrl.Result{}, err + } - storage.Status.Status = dwsv1alpha2.DisabledStatus - storage.Status.Message = "Storage node manually disabled" + if !nodeState.nodeReady { + log.Info("storage node is offline") + storage.Status.Status = dwsv1alpha2.OfflineStatus + storage.Status.Message = "Kubernetes node is offline" + } else if len(nodeState.nnfTaint) > 0 { + log.Info(fmt.Sprintf("storage node is tainted with %s", nodeState.nnfTaint)) + storage.Status.Status = dwsv1alpha2.DrainedStatus + storage.Status.Message = fmt.Sprintf("Kubernetes node is tainted with %s", nodeState.nnfTaint) + } else { + storage.Status.Status = nnfNode.Status.Status.ConvertToDWSResourceStatus() + } } return ctrl.Result{}, nil @@ -285,7 +273,7 @@ func (r *DWSStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&dwsv1alpha2.Storage{}). - Watches(&nnfv1alpha1.NnfNode{}, handler.EnqueueRequestsFromMapFunc(nnfNodeMapFunc)). + Watches(&nnfv1alpha2.NnfNode{}, handler.EnqueueRequestsFromMapFunc(nnfNodeMapFunc)). Watches(&corev1.Node{}, handler.EnqueueRequestsFromMapFunc(nodeMapFunc)). Complete(r) } diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go index 629af9a83..e8801acf9 100644 --- a/internal/controller/filesystem_helpers.go +++ b/internal/controller/filesystem_helpers.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -35,14 +35,14 @@ import ( "github.com/go-logr/logr" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages/finalizers,verbs=update //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=get;create;list;watch;update;patch;delete;deletecollection -func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { +func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { blockDevice, err := newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) if err != nil { @@ -58,7 +58,7 @@ func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nn } // getBlockDeviceAndFileSystem returns blockdevice and filesystem interfaces based on the allocation type and NnfStorageProfile. -func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { +func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") if found || os.Getenv("ENVIRONMENT") == "kind" { return getBlockDeviceAndFileSystemForKind(ctx, c, nnfNodeStorage, index, log) @@ -107,7 +107,7 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt return blockDevice, fileSystem, nil case "lustre": - commandLines := nnfv1alpha1.NnfStorageProfileLustreCmdLines{} + commandLines := nnfv1alpha2.NnfStorageProfileLustreCmdLines{} switch nnfNodeStorage.Spec.LustreStorage.TargetType { case "mgt": @@ -151,7 +151,7 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt return nil, nil, dwsv1alpha2.NewResourceError("unsupported file system type %s", nnfNodeStorage.Spec.FileSystemType).WithMajor() } -func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage) (bool, error) { +func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBlockStorage *nnfv1alpha2.NnfNodeBlockStorage) (bool, error) { if _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT"); found { return true, nil } @@ -188,15 +188,15 @@ func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBloc return false, nil } -func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileLustreCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { +func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, cmdLines nnfv1alpha2.NnfStorageProfileLustreCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { zpool := blockdevice.Zpool{} // This is for the fake NnfNodeStorage case. We don't need to create the zpool BlockDevice - if nnfNodeStorage.Spec.BlockReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name() { + if nnfNodeStorage.Spec.BlockReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfNodeBlockStorage{}).Name() { return newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) } - nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{ + nnfNodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorage.GetName(), Namespace: nnfNodeStorage.GetNamespace(), @@ -237,7 +237,7 @@ func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *n return &zpool, nil } -func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { +func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, cmdLines nnfv1alpha2.NnfStorageProfileCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { lvmDesc := blockdevice.Lvm{} devices := []string{} @@ -246,8 +246,8 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf blockIndex = 0 } - if nnfNodeStorage.Spec.BlockReference.Kind == reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name() { - nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{ + if nnfNodeStorage.Spec.BlockReference.Kind == reflect.TypeOf(nnfv1alpha2.NnfNodeBlockStorage{}).Name() { + nnfNodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorage.GetName(), Namespace: nnfNodeStorage.GetNamespace(), @@ -330,7 +330,7 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &lvmDesc, nil } -func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, error) { +func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, error) { blockDevice := blockdevice.MockBlockDevice{ Log: log, } @@ -338,7 +338,7 @@ func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nn return &blockDevice, nil } -func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.SimpleFileSystem{} fs.Log = log @@ -352,7 +352,7 @@ func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &fs, nil } -func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, cmdLines nnfv1alpha2.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.SimpleFileSystem{} fs.Log = log @@ -376,7 +376,7 @@ func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &fs, nil } -func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, cmdLines nnfv1alpha2.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.SimpleFileSystem{} fs.Log = log @@ -395,7 +395,7 @@ func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv return &fs, nil } -func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileLustreCmdLines, mountCommand string, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, cmdLines nnfv1alpha2.NnfStorageProfileLustreCmdLines, mountCommand string, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.LustreFileSystem{} targetPath, err := lustreTargetPath(ctx, c, nnfNodeStorage, nnfNodeStorage.Spec.LustreStorage.TargetType, nnfNodeStorage.Spec.LustreStorage.StartIndex+index) @@ -419,7 +419,7 @@ func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *n return &fs, nil } -func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { path := os.Getenv("MOCK_FILE_SYSTEM_PATH") if len(path) == 0 { path = "/mnt/filesystems" @@ -433,7 +433,7 @@ func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &fs, nil } -func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, targetType string, index int) (string, error) { +func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, targetType string, index int) (string, error) { labels := nnfNodeStorage.GetLabels() // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. @@ -446,7 +446,7 @@ func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv return fmt.Sprintf("/mnt/nnf/%s-%s-%d", nnfStorageUid, targetType, index), nil } -func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, targetType string, index int) (string, error) { +func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, targetType string, index int) (string, error) { labels := nnfNodeStorage.GetLabels() // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. @@ -459,7 +459,7 @@ func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1 return fmt.Sprintf("pool-%s-%s-%d", nnfStorageUid, targetType, index), nil } -func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (string, error) { +func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, index int) (string, error) { labels := nnfNodeStorage.GetLabels() // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. @@ -468,7 +468,7 @@ func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1 if !ok { return "", fmt.Errorf("missing Owner UID label on NnfNodeStorage") } - directiveIndex, ok := labels[nnfv1alpha1.DirectiveIndexLabel] + directiveIndex, ok := labels[nnfv1alpha2.DirectiveIndexLabel] if !ok { return "", fmt.Errorf("missing directive index label on NnfNodeStorage") } @@ -480,7 +480,7 @@ func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1 return fmt.Sprintf("%s_%s_%d", nnfStorageUid, directiveIndex, index), nil } -func logicalVolumeName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (string, error) { +func logicalVolumeName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, index int) (string, error) { if nnfNodeStorage.Spec.SharedAllocation { // For a shared VG, the LV name must be unique in the VG return fmt.Sprintf("lv-%d", index), nil diff --git a/internal/controller/integration_test.go b/internal/controller/integration_test.go index 215bbfdaf..8b9886d4b 100644 --- a/internal/controller/integration_test.go +++ b/internal/controller/integration_test.go @@ -42,8 +42,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" dwparse "github.com/DataWorkflowServices/dws/utils/dwdparse" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("Integration Test", func() { @@ -63,9 +62,9 @@ var _ = Describe("Integration Test", func() { persistentInstance *dwsv1alpha2.PersistentStorageInstance nodeNames []string setup sync.Once - storageProfile *nnfv1alpha1.NnfStorageProfile - dmProfile *nnfv1alpha1.NnfDataMovementProfile - dmm *nnfv1alpha1.NnfDataMovementManager + storageProfile *nnfv1alpha2.NnfStorageProfile + dmProfile *nnfv1alpha2.NnfDataMovementProfile + dmm *nnfv1alpha2.NnfDataMovementManager ) advanceState := func(state dwsv1alpha2.WorkflowState, w *dwsv1alpha2.Workflow, testStackOffset int) { @@ -83,12 +82,12 @@ var _ = Describe("Integration Test", func() { }).WithOffset(testStackOffset).Should(Equal(state), fmt.Sprintf("Waiting on state %s", state)) } - verifyNnfNodeStoragesHaveStorageProfileLabel := func(nnfStorage *nnfv1alpha1.NnfStorage) { + verifyNnfNodeStoragesHaveStorageProfileLabel := func(nnfStorage *nnfv1alpha2.NnfStorage) { for allocationSetIndex := range nnfStorage.Spec.AllocationSets { allocationSet := nnfStorage.Spec.AllocationSets[allocationSetIndex] for i, node := range allocationSet.Nodes { // Per Rabbit namespace. - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{ + nnfNodeStorage := &nnfv1alpha2.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorageName(nnfStorage, allocationSetIndex, i), Namespace: node.Name, @@ -139,14 +138,14 @@ var _ = Describe("Integration Test", func() { if findDataMovementDirectiveIndex() >= 0 { - dms := &nnfv1alpha1.NnfDataMovementList{} + dms := &nnfv1alpha2.NnfDataMovementList{} Expect(k8sClient.List(context.TODO(), dms)).To(Succeed()) for _, dm := range dms.Items { dm := dm g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(&dm), &dm)).To(Succeed()) - dm.Status.State = nnfv1alpha1.DataMovementConditionTypeFinished - dm.Status.Status = nnfv1alpha1.DataMovementConditionReasonSuccess + dm.Status.State = nnfv1alpha2.DataMovementConditionTypeFinished + dm.Status.Status = nnfv1alpha2.DataMovementConditionReasonSuccess g.Expect(k8sClient.Status().Update(context.TODO(), &dm)).To(Succeed()) } } @@ -165,7 +164,7 @@ var _ = Describe("Integration Test", func() { } By("Verify that the NnfStorage now owns the pinned profile") commonName, commonNamespace := getStorageReferenceNameFromWorkflowActual(w, dwIndex) - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: commonName, Namespace: commonNamespace}, nnfStorage)).To(Succeed()) Expect(verifyPinnedProfile(context.TODO(), k8sClient, commonNamespace, commonName)).WithOffset(testStackOffset).To(Succeed()) @@ -258,7 +257,7 @@ var _ = Describe("Integration Test", func() { BlockOwnerDeletion: &blockOwnerDeletion, } - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} if nnfStoragePresent { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentInstance), nnfStorage)).To(Succeed(), "Fetch NnfStorage matching PersistentStorageInstance") Expect(nnfStorage.ObjectMeta.OwnerReferences).To(ContainElement(persistentStorageOwnerRef), "NnfStorage owned by PersistentStorageInstance") @@ -346,7 +345,7 @@ var _ = Describe("Integration Test", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - v1alpha1.RabbitNodeSelectorLabel: "true", + nnfv1alpha2.RabbitNodeSelectorLabel: "true", }, }, Status: corev1.NodeStatus{ @@ -362,21 +361,21 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Create(context.TODO(), node)).To(Succeed()) // Create the NNF Node resource - nnfNode := &nnfv1alpha1.NnfNode{ + nnfNode := &nnfv1alpha2.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, - Spec: nnfv1alpha1.NnfNodeSpec{ + Spec: nnfv1alpha2.NnfNodeSpec{ Name: nodeName, - State: nnfv1alpha1.ResourceEnable, + State: nnfv1alpha2.ResourceEnable, }, - Status: nnfv1alpha1.NnfNodeStatus{}, + Status: nnfv1alpha2.NnfNodeStatus{}, } Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) - // Create the DWS Storage resource + // Check that the DWS storage resource was updated with the compute node information storage := &dwsv1alpha2.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, @@ -384,10 +383,6 @@ var _ = Describe("Integration Test", func() { }, } - Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed()) - - // Check that the DWS storage resource was updated with the compute node information - Eventually(func() error { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage) }).Should(Succeed()) @@ -427,38 +422,26 @@ var _ = Describe("Integration Test", func() { workflow = nil Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), dmProfile)).To(Succeed()) - dmProfExpected := &nnfv1alpha1.NnfDataMovementProfile{} + dmProfExpected := &nnfv1alpha2.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmProfile), dmProfExpected) }).ShouldNot(Succeed()) for _, nodeName := range nodeNames { - storage := &dwsv1alpha2.Storage{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Namespace: corev1.NamespaceDefault, - }, - } - Expect(k8sClient.Delete(context.TODO(), storage)).To(Succeed()) - tempStorage := &dwsv1alpha2.Storage{} - Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present - return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), tempStorage) - }).ShouldNot(Succeed()) - - nnfNode := &nnfv1alpha1.NnfNode{ + nnfNode := &nnfv1alpha2.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, } Expect(k8sClient.Delete(context.TODO(), nnfNode)).To(Succeed()) - tempNnfNode := &nnfv1alpha1.NnfNode{} + tempNnfNode := &nnfv1alpha2.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNode), tempNnfNode) }).ShouldNot(Succeed()) @@ -758,7 +741,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) By("Check for an NNF Access describing the computes") - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "computes"), Namespace: workflow.Namespace, @@ -800,9 +783,9 @@ var _ = Describe("Integration Test", func() { Expect(access.Spec.StorageReference).To(MatchFields(IgnoreExtras, Fields{ "Name": Equal(storageName), "Namespace": Equal(workflow.Namespace), // Namespace is the same as the workflow - "Kind": Equal(reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name()), })) - storage := &nnfv1alpha1.NnfStorage{ + storage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: access.Spec.StorageReference.Name, Namespace: access.Spec.StorageReference.Namespace, @@ -828,7 +811,7 @@ var _ = Describe("Integration Test", func() { // For shared file systems, there should also be a NNF Access for the Rabbit as well as corresponding Client Mounts per Rabbit if fsType == "gfs2" { By("Checking for an NNF Access describing the servers") - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "servers"), Namespace: workflow.Namespace, @@ -858,7 +841,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) By("Check that NNF Access describing computes is not present") - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "computes"), Namespace: workflow.Namespace, @@ -874,7 +857,7 @@ var _ = Describe("Integration Test", func() { if fsType == "gfs2" { By("Check that NNF Access describing computes is not present") - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "servers"), Namespace: workflow.Namespace, @@ -918,7 +901,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("NNFStorages for persistentStorageInstance should NOT be deleted") - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} Consistently(func() error { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), nnfStorage) }).Should(Succeed(), "NnfStorage should continue to exist") @@ -933,7 +916,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("NNFStorages associated with jobdw should be deleted") - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), nnfStorage) }).ShouldNot(Succeed(), "NnfStorage should be deleted") @@ -1026,7 +1009,7 @@ var _ = Describe("Integration Test", func() { BeforeEach(func() { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha1.DataMovementNamespace, + Name: nnfv1alpha2.DataMovementNamespace, }, } @@ -1045,12 +1028,12 @@ var _ = Describe("Integration Test", func() { }, } - dmm = &nnfv1alpha1.NnfDataMovementManager{ + dmm = &nnfv1alpha2.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha1.DataMovementManagerName, - Namespace: nnfv1alpha1.DataMovementNamespace, + Name: nnfv1alpha2.DataMovementManagerName, + Namespace: nnfv1alpha2.DataMovementNamespace, }, - Spec: nnfv1alpha1.NnfDataMovementManagerSpec{ + Spec: nnfv1alpha2.NnfDataMovementManagerSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -1060,7 +1043,7 @@ var _ = Describe("Integration Test", func() { }, }, }, - Status: nnfv1alpha1.NnfDataMovementManagerStatus{ + Status: nnfv1alpha2.NnfDataMovementManagerStatus{ Ready: true, }, } @@ -1168,7 +1151,7 @@ var _ = Describe("Integration Test", func() { validateNnfAccessHasCorrectTeardownState := func(state dwsv1alpha2.WorkflowState) { Expect(workflow.Status.DirectiveBreakdowns).To(HaveLen(1)) - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d-%s", workflow.Name, 0, "servers"), Namespace: workflow.Namespace, @@ -1191,7 +1174,7 @@ var _ = Describe("Integration Test", func() { validateNnfAccessIsNotFound := func() { Expect(workflow.Status.DirectiveBreakdowns).To(HaveLen(1)) - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d-%s", workflow.Name, 0, "servers"), Namespace: workflow.Namespace, @@ -1295,15 +1278,15 @@ var _ = Describe("Integration Test", func() { By("Injecting an error in the data movement resource") - dm := &nnfv1alpha1.NnfDataMovement{ + dm := &nnfv1alpha2.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "failed-data-movement", - Namespace: nnfv1alpha1.DataMovementNamespace, + Namespace: nnfv1alpha2.DataMovementNamespace, }, } dwsv1alpha2.AddWorkflowLabels(dm, workflow) dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha1.AddDataMovementTeardownStateLabel(dm, dwsv1alpha2.StatePostRun) + nnfv1alpha2.AddDataMovementTeardownStateLabel(dm, dwsv1alpha2.StatePostRun) Expect(k8sClient.Create(context.TODO(), dm)).To(Succeed()) @@ -1311,8 +1294,8 @@ var _ = Describe("Integration Test", func() { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) }).Should(Succeed()) - dm.Status.State = nnfv1alpha1.DataMovementConditionTypeFinished - dm.Status.Status = nnfv1alpha1.DataMovementConditionReasonFailed + dm.Status.State = nnfv1alpha2.DataMovementConditionTypeFinished + dm.Status.Status = nnfv1alpha2.DataMovementConditionReasonFailed Expect(k8sClient.Status().Update(context.TODO(), dm)).To(Succeed()) @@ -1341,7 +1324,7 @@ var _ = Describe("Integration Test", func() { Describe("Test with container directives", func() { var ( - containerProfile *nnfv1alpha1.NnfContainerProfile + containerProfile *nnfv1alpha2.NnfContainerProfile ) BeforeEach(func() { @@ -1412,7 +1395,7 @@ var _ = Describe("Integration Test", func() { By("verifying the number of targeted NNF nodes for the container jobs") matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) - matchLabels[nnfv1alpha1.DirectiveIndexLabel] = "0" + matchLabels[nnfv1alpha2.DirectiveIndexLabel] = "0" jobList := &batchv1.JobList{} Eventually(func() int { @@ -1436,9 +1419,9 @@ var _ = Describe("Integration Test", func() { var ( intendedDirective string - profileExternalMGS *nnfv1alpha1.NnfStorageProfile - profileCombinedMGTMDT *nnfv1alpha1.NnfStorageProfile - nnfLustreMgt *nnfv1alpha1.NnfLustreMGT + profileExternalMGS *nnfv1alpha2.NnfStorageProfile + profileCombinedMGTMDT *nnfv1alpha2.NnfStorageProfile + nnfLustreMgt *nnfv1alpha2.NnfLustreMGT profileMgsNid string @@ -1469,13 +1452,13 @@ var _ = Describe("Integration Test", func() { Expect(createNnfStorageProfile(profileExternalMGS, true)).ToNot(BeNil()) Expect(createNnfStorageProfile(profileCombinedMGTMDT, true)).ToNot(BeNil()) - nnfLustreMgt = &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt = &nnfv1alpha2.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "profile-mgs", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfLustreMGTSpec{ + Spec: nnfv1alpha2.NnfLustreMGTSpec{ Addresses: []string{profileMgsNid}, FsNameStart: "dddddddd", }, @@ -1582,7 +1565,7 @@ var _ = Describe("Integration Test", func() { By(fmt.Sprintf("Verify that the MGS NID %s is used by the filesystem", getNidVia)) advanceStateAndCheckReady(dwsv1alpha2.StateSetup, workflow) // The NnfStorage's name matches the Server resource's name. - nnfstorage := &nnfv1alpha1.NnfStorage{} + nnfstorage := &nnfv1alpha2.NnfStorage{} Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dbdServer), nnfstorage)).To(Succeed()) for _, comp := range nnfstorage.Spec.AllocationSets { Expect(comp.MgsAddress).To(Equal(desiredNid)) diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index 38f10e33d..eee2b8cc7 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -43,10 +43,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -82,7 +83,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( metrics.NnfAccessReconcilesTotal.Inc() - access := &nnfv1alpha1.NnfAccess{} + access := &nnfv1alpha2.NnfAccess{} if err := r.Get(ctx, req.NamespacedName, access); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -90,7 +91,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfAccessStatus](access) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfAccessStatus](access) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { access.Status.SetResourceErrorAndLog(err, log) }() @@ -199,7 +200,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } -func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { +func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha2.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { // Lock the NnfStorage by adding an annotation with the name/namespace for this // NnfAccess. This is used for non-clustered file systems that can only be mounted // from a single host. @@ -212,7 +213,7 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.Nnf return &ctrl.Result{RequeueAfter: time.Second}, nil } - // Add compute node information to the storage map, if necessary. + // Request that the devices be made available on the correct nodes err = r.addBlockStorageAccess(ctx, access, storageMapping) if err != nil { if apierrors.IsConflict(err) { @@ -222,6 +223,16 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.Nnf return nil, dwsv1alpha2.NewResourceError("unable to add endpoints to NnfNodeStorage").WithError(err) } + // Wait for all the devices to be made available on the correct nodes + ready, err := r.getBlockStorageAccessStatus(ctx, access, storageMapping) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("unable to check endpoints for NnfNodeStorage").WithError(err) + } + + if ready == false { + return &ctrl.Result{RequeueAfter: time.Second * 2}, nil + } + // Create the ClientMount resources. One ClientMount resource is created per client err = r.manageClientMounts(ctx, access, storageMapping) if err != nil { @@ -232,15 +243,6 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.Nnf return nil, dwsv1alpha2.NewResourceError("unable to create ClientMount resources").WithError(err) } - ready, err := r.getBlockStorageAccessStatus(ctx, access, storageMapping) - if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to check endpoints for NnfNodeStorage").WithError(err) - } - - if ready == false { - return &ctrl.Result{}, nil - } - // Aggregate the status from all the ClientMount resources ready, err = r.getClientMountStatus(ctx, access, clientList) if err != nil { @@ -255,7 +257,7 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.Nnf return nil, nil } -func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha1.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { +func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha2.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { // Update client mounts to trigger unmount operation err := r.manageClientMounts(ctx, access, storageMapping) if err != nil { @@ -289,9 +291,9 @@ func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha1.N // lockStorage applies an annotation to the NnfStorage resource with the name and namespace of the NnfAccess resource. // This acts as a lock to prevent multiple NnfAccess resources from mounting the same file system. This is only necessary // for non-clustered file systems -func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alpha1.NnfAccess) (bool, error) { +func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alpha2.NnfAccess) (bool, error) { - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name() { return false, fmt.Errorf("invalid StorageReference kind %s", access.Spec.StorageReference.Kind) } @@ -300,7 +302,7 @@ func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alph Namespace: access.Spec.StorageReference.Namespace, } - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} if err := r.Get(ctx, namespacedName, nnfStorage); err != nil { return false, err } @@ -343,10 +345,10 @@ func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alph } // unlockStorage removes the NnfAccess annotation from an NnfStorage resource if it was added from lockStorage() -func (r *NnfAccessReconciler) unlockStorage(ctx context.Context, access *nnfv1alpha1.NnfAccess) error { - nnfStorage := &nnfv1alpha1.NnfStorage{} +func (r *NnfAccessReconciler) unlockStorage(ctx context.Context, access *nnfv1alpha2.NnfAccess) error { + nnfStorage := &nnfv1alpha2.NnfStorage{} - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name() { return nil } @@ -393,7 +395,7 @@ func (r *NnfAccessReconciler) unlockStorage(ctx context.Context, access *nnfv1al } // getClientList returns the list of client node names from either the Computes resource of the NnfStorage resource -func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1alpha1.NnfAccess) ([]string, error) { +func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1alpha2.NnfAccess) ([]string, error) { if access.Spec.ClientReference != (corev1.ObjectReference{}) { return r.getClientListFromClientReference(ctx, access) } @@ -402,7 +404,7 @@ func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1al } // getClientListFromClientReference returns a list of client nodes names from the Computes resource -func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Context, access *nnfv1alpha1.NnfAccess) ([]string, error) { +func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Context, access *nnfv1alpha2.NnfAccess) ([]string, error) { computes := &dwsv1alpha2.Computes{} if access.Spec.ClientReference.Kind != reflect.TypeOf(dwsv1alpha2.Computes{}).Name() { @@ -428,9 +430,9 @@ func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Conte // getClientListFromStorageReference returns a list of client node names from the NnfStorage resource. This is the list of Rabbit // nodes that host the storage -func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Context, access *nnfv1alpha1.NnfAccess) ([]string, error) { +func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Context, access *nnfv1alpha2.NnfAccess) ([]string, error) { - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name() { return nil, fmt.Errorf("Invalid StorageReference kind %s", access.Spec.StorageReference.Kind) } @@ -439,7 +441,7 @@ func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Cont Namespace: access.Spec.StorageReference.Namespace, } - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} if err := r.Get(ctx, namespacedName, nnfStorage); err != nil { return nil, err } @@ -461,10 +463,10 @@ func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Cont } // mapClientStorage returns a map of the clients with a list of mounts to make. This picks a device for each client -func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv1alpha1.NnfAccess, clients []string) (map[string][]dwsv1alpha2.ClientMountInfo, error) { - nnfStorage := &nnfv1alpha1.NnfStorage{} +func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv1alpha2.NnfAccess, clients []string) (map[string][]dwsv1alpha2.ClientMountInfo, error) { + nnfStorage := &nnfv1alpha2.NnfStorage{} - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name() { return nil, fmt.Errorf("Invalid StorageReference kind %s", access.Spec.StorageReference.Kind) } @@ -502,7 +504,7 @@ func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv // mapClientNetworkStorage provides the Lustre MGS address information for the clients. All clients get the same // mount information -func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha1.NnfAccess, clients []string, nnfStorage *nnfv1alpha1.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { +func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha2.NnfAccess, clients []string, nnfStorage *nnfv1alpha2.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { storageMapping := make(map[string][]dwsv1alpha2.ClientMountInfo) for _, client := range clients { @@ -533,7 +535,7 @@ func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, acces // mapClientLocalStorage picks storage device(s) for each client to access based on locality information // from the (DWS) Storage resources. -func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access *nnfv1alpha1.NnfAccess, clients []string, nnfStorage *nnfv1alpha1.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { +func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access *nnfv1alpha2.NnfAccess, clients []string, nnfStorage *nnfv1alpha2.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { allocationSetSpec := nnfStorage.Spec.AllocationSets[setIndex] // Use information from the NnfStorage resource to determine how many allocations @@ -565,14 +567,14 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // allocation. for nodeName, storageCount := range storageCountMap { matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) - matchLabels[nnfv1alpha1.AllocationSetLabel] = allocationSetSpec.Name + matchLabels[nnfv1alpha2.AllocationSetLabel] = allocationSetSpec.Name listOptions := []client.ListOption{ matchLabels, client.InNamespace(nodeName), } - nnfNodeStorageList := &nnfv1alpha1.NnfNodeStorageList{} + nnfNodeStorageList := &nnfv1alpha2.NnfNodeStorageList{} if err := r.List(ctx, nnfNodeStorageList, listOptions...); err != nil { return nil, err } @@ -595,7 +597,7 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // so clientmountd will not look at the DeviceReference struct. The DeviceReference information is used by // the data movement code to match up mounts between the Rabbit and compute node. mountInfo.Device.DeviceReference = &dwsv1alpha2.ClientMountDeviceReference{} - mountInfo.Device.DeviceReference.ObjectReference.Kind = reflect.TypeOf(nnfv1alpha1.NnfNodeStorage{}).Name() + mountInfo.Device.DeviceReference.ObjectReference.Kind = reflect.TypeOf(nnfv1alpha2.NnfNodeStorage{}).Name() mountInfo.Device.DeviceReference.ObjectReference.Name = nnfNodeStorage.Name mountInfo.Device.DeviceReference.ObjectReference.Namespace = nnfNodeStorage.Namespace mountInfo.Device.DeviceReference.Data = i @@ -709,7 +711,7 @@ type mountReference struct { // addNodeStorageEndpoints adds the compute node information to the NnfNodeStorage resource // so it can make the NVMe namespaces accessible on the compute node. This is done on the rabbit // by creating StorageGroup resources through swordfish for the correct endpoint. -func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha2.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be added for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -726,7 +728,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfNodeStorage{}).Name() { continue } @@ -741,15 +743,26 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access // Loop through the NnfNodeBlockStorages and add client access information for each of the // computes that need access to an allocation. - for nodeBlockStorageReference, mountRefList := range nodeStorageMap { - namespacedName := types.NamespacedName{ - Name: nodeBlockStorageReference.Name, - Namespace: nodeBlockStorageReference.Namespace, + for nodeStorageReference, mountRefList := range nodeStorageMap { + nnfNodeStorage := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeStorageReference.Name, + Namespace: nodeStorageReference.Namespace, + }, } - nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{} - err := r.Get(ctx, namespacedName, nnfNodeBlockStorage) - if err != nil { + if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNodeStorage), nnfNodeStorage); err != nil { + return err + } + + nnfNodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nnfNodeStorage.Spec.BlockReference.Name, + Namespace: nnfNodeStorage.Spec.BlockReference.Namespace, + }, + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNodeBlockStorage), nnfNodeBlockStorage); err != nil { return err } @@ -780,7 +793,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access continue } - if err = r.Update(ctx, nnfNodeBlockStorage); err != nil { + if err := r.Update(ctx, nnfNodeBlockStorage); err != nil { return err } } @@ -788,48 +801,86 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access return nil } -func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { +func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha2.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { // NnfNodeStorage clientReferences only need to be checked for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { return true, nil } - nodeStorageMap := make(map[corev1.ObjectReference]bool) + nodeStorageMap := make(map[corev1.ObjectReference][]mountReference) // Make a map of NnfNodeStorage references that were mounted by this // nnfAccess - for _, storageList := range storageMapping { + for client, storageList := range storageMapping { for _, mount := range storageList { if mount.Device.DeviceReference == nil { continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfNodeStorage{}).Name() { continue } - nodeStorageMap[mount.Device.DeviceReference.ObjectReference] = true + mountRef := mountReference{ + client: client, + allocationIndex: mount.Device.DeviceReference.Data, + } + + nodeStorageMap[mount.Device.DeviceReference.ObjectReference] = append(nodeStorageMap[mount.Device.DeviceReference.ObjectReference], mountRef) + } } - // Update each of the NnfNodeStorage resources to remove the clientEndpoints that - // were added earlier. Leave the first endpoint since that corresponds to the - // rabbit node. + nnfNodeBlockStorages := []nnfv1alpha2.NnfNodeBlockStorage{} + for nodeStorageReference := range nodeStorageMap { - namespacedName := types.NamespacedName{ - Name: nodeStorageReference.Name, - Namespace: nodeStorageReference.Namespace, + nnfNodeStorage := &nnfv1alpha2.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeStorageReference.Name, + Namespace: nodeStorageReference.Namespace, + }, } - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{} - err := r.Get(ctx, namespacedName, nnfNodeStorage) - if err != nil { + if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNodeStorage), nnfNodeStorage); err != nil { return false, err } - if nnfNodeStorage.Status.Error != nil { - return false, dwsv1alpha2.NewResourceError("Node: %s", nnfNodeStorage.GetNamespace()).WithError(nnfNodeStorage.Status.Error) + nnfNodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nnfNodeStorage.Spec.BlockReference.Name, + Namespace: nnfNodeStorage.Spec.BlockReference.Namespace, + }, + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNodeBlockStorage), nnfNodeBlockStorage); err != nil { + return false, err + } + + nnfNodeBlockStorages = append(nnfNodeBlockStorages, *nnfNodeBlockStorage) + } + + for _, nnfNodeBlockStorage := range nnfNodeBlockStorages { + if nnfNodeBlockStorage.Status.Error != nil { + return false, dwsv1alpha2.NewResourceError("Node: %s", nnfNodeBlockStorage.GetNamespace()).WithError(nnfNodeBlockStorage.Status.Error) + } + } + + for _, nnfNodeBlockStorage := range nnfNodeBlockStorages { + for allocationIndex, allocation := range nnfNodeBlockStorage.Spec.Allocations { + for _, nodeName := range allocation.Access { + blockAccess, exists := nnfNodeBlockStorage.Status.Allocations[allocationIndex].Accesses[nodeName] + + // if the map entry doesn't exist in the status section for this node yet, then keep waiting + if !exists { + return false, nil + } + + // Check that the storage group has been created + if blockAccess.StorageGroupId == "" { + return false, nil + } + } } } @@ -839,7 +890,7 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a // removeNodeStorageEndpoints modifies the NnfNodeStorage resources to remove the client endpoints for the // compute nodes that had mounted the storage. This causes NnfNodeStorage to remove the StorageGroups for // those compute nodes and remove access to the NVMe namespaces from the computes. -func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha2.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be removed for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -856,7 +907,7 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfNodeStorage{}).Name() { continue } @@ -873,7 +924,7 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce Namespace: nodeBlockStorageReference.Namespace, } - nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{} + nnfNodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{} err := r.Get(ctx, namespacedName, nnfNodeBlockStorage) if err != nil { if apierrors.IsNotFound(err) { @@ -901,18 +952,18 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce } // manageClientMounts creates or updates the ClientMount resources based on the information in the storageMapping map. -func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nnfv1alpha2.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { log := r.Log.WithValues("NnfAccess", client.ObjectKeyFromObject(access)) if !access.Spec.MakeClientMounts { return nil } - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name() { return dwsv1alpha2.NewResourceError("invalid StorageReference kind %s", access.Spec.StorageReference.Kind).WithFatal() } - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: access.Spec.StorageReference.Name, Namespace: access.Spec.StorageReference.Namespace, @@ -981,7 +1032,7 @@ func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nn } // getClientMountStatus aggregates the status from all the ClientMount resources -func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access *nnfv1alpha1.NnfAccess, clientList []string) (bool, error) { +func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access *nnfv1alpha2.NnfAccess, clientList []string) (bool, error) { if !access.Spec.MakeClientMounts { return true, nil } @@ -1034,7 +1085,7 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * return true, nil } -func clientMountName(access *nnfv1alpha1.NnfAccess) string { +func clientMountName(access *nnfv1alpha2.NnfAccess) string { return access.Namespace + "-" + access.Name } @@ -1060,6 +1111,67 @@ func getIndexMountDir(namespace string, index int) string { return fmt.Sprintf("%s-%s", namespace, strconv.Itoa(index)) } +// ComputesEnqueueRequests triggers on a Computes resource. It finds any NnfAccess resources with the +// same owner as the Computes resource and adds them to the Request list. +func (r *NnfAccessReconciler) ComputesEnqueueRequests(ctx context.Context, o client.Object) []reconcile.Request { + log := r.Log.WithValues("Computes", "Enqueue") + + requests := []reconcile.Request{} + + // Ensure the storage resource is updated with the latest NNF Node resource status + computes := &dwsv1alpha2.Computes{ + ObjectMeta: metav1.ObjectMeta{ + Name: o.GetName(), + Namespace: o.GetNamespace(), + }, + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(computes), computes); err != nil { + return requests + } + + labels := computes.GetLabels() + if labels == nil { + return []reconcile.Request{} + } + + ownerName, exists := labels[dwsv1alpha2.OwnerNameLabel] + if !exists { + return []reconcile.Request{} + } + + ownerNamespace, exists := labels[dwsv1alpha2.OwnerNamespaceLabel] + if !exists { + return []reconcile.Request{} + } + + ownerKind, exists := labels[dwsv1alpha2.OwnerKindLabel] + if !exists { + return []reconcile.Request{} + } + + // Find all the NnfAccess resource with the same owner as the Computes resource + listOptions := []client.ListOption{ + client.MatchingLabels(map[string]string{ + dwsv1alpha2.OwnerKindLabel: ownerKind, + dwsv1alpha2.OwnerNameLabel: ownerName, + dwsv1alpha2.OwnerNamespaceLabel: ownerNamespace, + }), + } + + nnfAccessList := &nnfv1alpha2.NnfAccessList{} + if err := r.List(context.TODO(), nnfAccessList, listOptions...); err != nil { + log.Info("Could not list NnfAccesses", "error", err) + return requests + } + + for _, nnfAccess := range nnfAccessList.Items { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: nnfAccess.GetName(), Namespace: nnfAccess.GetNamespace()}}) + } + + return requests +} + // SetupWithManager sets up the controller with the Manager. func (r *NnfAccessReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ @@ -1085,7 +1197,8 @@ func (r *NnfAccessReconciler) SetupWithManager(mgr ctrl.Manager) error { maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&nnfv1alpha1.NnfAccess{}). + For(&nnfv1alpha2.NnfAccess{}). + Watches(&dwsv1alpha2.Computes{}, handler.EnqueueRequestsFromMapFunc(r.ComputesEnqueueRequests)). Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_access_controller_test.go b/internal/controller/nnf_access_controller_test.go index 3f4d3c35f..a41d99765 100644 --- a/internal/controller/nnf_access_controller_test.go +++ b/internal/controller/nnf_access_controller_test.go @@ -34,8 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("Access Controller Test", func() { @@ -44,12 +43,12 @@ var _ = Describe("Access Controller Test", func() { "rabbit-nnf-access-test-node-1", "rabbit-nnf-access-test-node-2"} - nnfNodes := [2]*nnfv1alpha1.NnfNode{} + nnfNodes := [2]*nnfv1alpha2.NnfNode{} storages := [2]*dwsv1alpha2.Storage{} nodes := [2]*corev1.Node{} var systemConfiguration *dwsv1alpha2.SystemConfiguration - var storageProfile *nnfv1alpha1.NnfStorageProfile + var storageProfile *nnfv1alpha2.NnfStorageProfile var setup sync.Once BeforeEach(func() { @@ -66,7 +65,7 @@ var _ = Describe("Access Controller Test", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - v1alpha1.RabbitNodeSelectorLabel: "true", + nnfv1alpha2.RabbitNodeSelectorLabel: "true", }, }, Status: corev1.NodeStatus{ @@ -81,14 +80,14 @@ var _ = Describe("Access Controller Test", func() { Expect(k8sClient.Create(context.TODO(), nodes[i])).To(Succeed()) - nnfNodes[i] = &nnfv1alpha1.NnfNode{ + nnfNodes[i] = &nnfv1alpha2.NnfNode{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, - Spec: nnfv1alpha1.NnfNodeSpec{ - State: nnfv1alpha1.ResourceEnable, + Spec: nnfv1alpha2.NnfNodeSpec{ + State: nnfv1alpha2.ResourceEnable, }, } Expect(k8sClient.Create(context.TODO(), nnfNodes[i])).To(Succeed()) @@ -137,7 +136,7 @@ var _ = Describe("Access Controller Test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) @@ -150,7 +149,7 @@ var _ = Describe("Access Controller Test", func() { }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), nnfNodes[i])).To(Succeed()) - tempNnfNode := &nnfv1alpha1.NnfNode{} + tempNnfNode := &nnfv1alpha2.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodes[i]), tempNnfNode) }).ShouldNot(Succeed()) @@ -172,29 +171,29 @@ var _ = Describe("Access Controller Test", func() { Describe("Create Client Mounts", func() { It("Creates Lustre Client Mount", func() { - allocationNodes := make([]nnfv1alpha1.NnfStorageAllocationNodes, len(nodeNames)) + allocationNodes := make([]nnfv1alpha2.NnfStorageAllocationNodes, len(nodeNames)) for idx, nodeName := range nodeNames { - allocationNodes[idx] = nnfv1alpha1.NnfStorageAllocationNodes{ + allocationNodes[idx] = nnfv1alpha2.NnfStorageAllocationNodes{ Count: 1, Name: nodeName, } } - storage := &nnfv1alpha1.NnfStorage{ + storage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-storage-lustre", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfStorageSpec{ + Spec: nnfv1alpha2.NnfStorageSpec{ FileSystemType: "lustre", - AllocationSets: []nnfv1alpha1.NnfStorageAllocationSetSpec{ + AllocationSets: []nnfv1alpha2.NnfStorageAllocationSetSpec{ { Name: "mgtmdt", Capacity: 50000000000, - NnfStorageLustreSpec: nnfv1alpha1.NnfStorageLustreSpec{ + NnfStorageLustreSpec: nnfv1alpha2.NnfStorageLustreSpec{ TargetType: "mgtmdt", }, - Nodes: []nnfv1alpha1.NnfStorageAllocationNodes{ + Nodes: []nnfv1alpha2.NnfStorageAllocationNodes{ { Count: 1, Name: nodeNames[0], @@ -204,7 +203,7 @@ var _ = Describe("Access Controller Test", func() { { Name: "ost", Capacity: 50000000000, - NnfStorageLustreSpec: nnfv1alpha1.NnfStorageLustreSpec{ + NnfStorageLustreSpec: nnfv1alpha2.NnfStorageLustreSpec{ TargetType: "ost", }, Nodes: allocationNodes, @@ -218,18 +217,18 @@ var _ = Describe("Access Controller Test", func() { It("Creates XFS Client Mount", func() { - storage := &nnfv1alpha1.NnfStorage{ + storage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-storage-xfs", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfStorageSpec{ + Spec: nnfv1alpha2.NnfStorageSpec{ FileSystemType: "xfs", - AllocationSets: []nnfv1alpha1.NnfStorageAllocationSetSpec{ + AllocationSets: []nnfv1alpha2.NnfStorageAllocationSetSpec{ { Name: "xfs", Capacity: 50000000000, - Nodes: []nnfv1alpha1.NnfStorageAllocationNodes{ + Nodes: []nnfv1alpha2.NnfStorageAllocationNodes{ { Count: 1, Name: nodeNames[0], @@ -249,18 +248,18 @@ var _ = Describe("Access Controller Test", func() { It("Creates GFS2 Client Mount", func() { - storage := &nnfv1alpha1.NnfStorage{ + storage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-storage-gfs2", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfStorageSpec{ + Spec: nnfv1alpha2.NnfStorageSpec{ FileSystemType: "gfs2", - AllocationSets: []nnfv1alpha1.NnfStorageAllocationSetSpec{ + AllocationSets: []nnfv1alpha2.NnfStorageAllocationSetSpec{ { Name: "gfs2", Capacity: 50000000000, - Nodes: []nnfv1alpha1.NnfStorageAllocationNodes{ + Nodes: []nnfv1alpha2.NnfStorageAllocationNodes{ { Count: 1, Name: nodeNames[0], @@ -280,7 +279,7 @@ var _ = Describe("Access Controller Test", func() { }) }) -func verifyClientMount(storage *nnfv1alpha1.NnfStorage, storageProfile *nnfv1alpha1.NnfStorageProfile, nodeNames []string) { +func verifyClientMount(storage *nnfv1alpha2.NnfStorage, storageProfile *nnfv1alpha2.NnfStorageProfile, nodeNames []string) { Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed(), "Create NNF Storage") Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage)).To(Succeed()) @@ -289,12 +288,12 @@ func verifyClientMount(storage *nnfv1alpha1.NnfStorage, storageProfile *nnfv1alp }).Should(Succeed()) mountPath := "/mnt/nnf/12345-0/" - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-access-" + storage.Spec.FileSystemType, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfAccessSpec{ + Spec: nnfv1alpha2.NnfAccessSpec{ DesiredState: "mounted", TeardownState: dwsv1alpha2.StatePreRun, @@ -305,7 +304,7 @@ func verifyClientMount(storage *nnfv1alpha1.NnfStorage, storageProfile *nnfv1alp MountPathPrefix: mountPath, StorageReference: corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), Name: storage.Name, Namespace: storage.Namespace, }, diff --git a/internal/controller/nnf_clientmount_controller.go b/internal/controller/nnf_clientmount_controller.go index 08107d1b3..e17e12f7b 100644 --- a/internal/controller/nnf_clientmount_controller.go +++ b/internal/controller/nnf_clientmount_controller.go @@ -41,7 +41,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -285,8 +285,8 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount // fakeNnfNodeStorage creates an NnfNodeStorage resource filled in with only the fields // that are necessary to mount the file system. This is done to reduce the API server load // because the compute nodes don't need to Get() the actual NnfNodeStorage. -func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int) (*nnfv1alpha1.NnfNodeStorage, error) { - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{ +func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int) (*nnfv1alpha2.NnfNodeStorage, error) { + nnfNodeStorage := &nnfv1alpha2.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Name, Namespace: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Namespace, @@ -298,7 +298,7 @@ func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clien // labels that are important for doing the mount are there and correct dwsv1alpha2.InheritParentLabels(nnfNodeStorage, clientMount) labels := nnfNodeStorage.GetLabels() - labels[nnfv1alpha1.DirectiveIndexLabel] = getTargetDirectiveIndexLabel(clientMount) + labels[nnfv1alpha2.DirectiveIndexLabel] = getTargetDirectiveIndexLabel(clientMount) labels[dwsv1alpha2.OwnerUidLabel] = getTargetOwnerUIDLabel(clientMount) nnfNodeStorage.SetLabels(labels) diff --git a/internal/controller/nnf_lustre_mgt_controller.go b/internal/controller/nnf_lustre_mgt_controller.go index 4bb7297e3..fe47cc511 100644 --- a/internal/controller/nnf_lustre_mgt_controller.go +++ b/internal/controller/nnf_lustre_mgt_controller.go @@ -39,7 +39,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" "github.com/NearNodeFlash/nnf-sos/pkg/command" ) @@ -85,7 +85,7 @@ func (r *NnfLustreMGTReconciler) Reconcile(ctx context.Context, req ctrl.Request metrics.NnfLustreMGTReconcilesTotal.Inc() - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{} + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{} if err := r.Get(ctx, req.NamespacedName, nnfLustreMgt); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -93,7 +93,7 @@ func (r *NnfLustreMGTReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfLustreMGTStatus](nnfLustreMgt) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfLustreMGTStatus](nnfLustreMgt) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nnfLustreMgt.Status.SetResourceErrorAndLog(err, log) }() @@ -224,7 +224,7 @@ func incrementFsName(fsname string) string { return string(incrementRuneList(runeList, 'a', 'z')) } -func isFsNameBlackListed(nnfLustreMgt *nnfv1alpha1.NnfLustreMGT, fsname string) bool { +func isFsNameBlackListed(nnfLustreMgt *nnfv1alpha2.NnfLustreMGT, fsname string) bool { // Check the blacklist for _, blackListedFsName := range nnfLustreMgt.Spec.FsNameBlackList { if fsname == blackListedFsName { @@ -237,7 +237,7 @@ func isFsNameBlackListed(nnfLustreMgt *nnfv1alpha1.NnfLustreMGT, fsname string) // SetFsNameNext sets the Status.FsNameNext field to the next available fsname. It also // updates the configmap the FsNameStartReference field if needed. -func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt *nnfv1alpha1.NnfLustreMGT, fsname string) (*ctrl.Result, error) { +func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt *nnfv1alpha2.NnfLustreMGT, fsname string) (*ctrl.Result, error) { // Find the next available fsname that isn't blacklisted for { fsname = incrementFsName(fsname) @@ -286,7 +286,7 @@ func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt // HandleNewClaims looks for any new claims in Spec.ClaimList and assigns them // an fsname -func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha1.NnfLustreMGT) (*ctrl.Result, error) { +func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha2.NnfLustreMGT) (*ctrl.Result, error) { claimMap := map[corev1.ObjectReference]string{} for _, claim := range nnfLustreMgt.Status.ClaimList { claimMap[claim.Reference] = claim.FsName @@ -304,7 +304,7 @@ func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreM return result, nil } - newClaim := nnfv1alpha1.NnfLustreMGTStatusClaim{ + newClaim := nnfv1alpha2.NnfLustreMGTStatusClaim{ Reference: reference, FsName: fsnameNext, } @@ -320,7 +320,7 @@ func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreM // RemoveOldClaims removes any old entries from the Status.ClaimList and erases the fsname from // the MGT if necessary. -func (r *NnfLustreMGTReconciler) RemoveOldClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha1.NnfLustreMGT) error { +func (r *NnfLustreMGTReconciler) RemoveOldClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha2.NnfLustreMGT) error { claimMap := map[corev1.ObjectReference]bool{} for _, reference := range nnfLustreMgt.Spec.ClaimList { claimMap[reference] = true @@ -341,7 +341,7 @@ func (r *NnfLustreMGTReconciler) RemoveOldClaims(ctx context.Context, nnfLustreM return nil } -func (r *NnfLustreMGTReconciler) EraseOldFsName(nnfLustreMgt *nnfv1alpha1.NnfLustreMGT, fsname string) error { +func (r *NnfLustreMGTReconciler) EraseOldFsName(nnfLustreMgt *nnfv1alpha2.NnfLustreMGT, fsname string) error { log := r.Log.WithValues("NnfLustreMGT", client.ObjectKeyFromObject(nnfLustreMgt)) if os.Getenv("ENVIRONMENT") == "kind" { @@ -386,7 +386,7 @@ func filterByNnfSystemNamespace() predicate.Predicate { func (r *NnfLustreMGTReconciler) SetupWithManager(mgr ctrl.Manager) error { builder := ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: 1}). - For(&nnfv1alpha1.NnfLustreMGT{}) + For(&nnfv1alpha2.NnfLustreMGT{}) switch r.ControllerType { case ControllerRabbit: diff --git a/internal/controller/nnf_lustre_mgt_controller_test.go b/internal/controller/nnf_lustre_mgt_controller_test.go index 1086d6be6..9393be795 100644 --- a/internal/controller/nnf_lustre_mgt_controller_test.go +++ b/internal/controller/nnf_lustre_mgt_controller_test.go @@ -30,18 +30,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("NnfLustreMGT Controller Test", func() { It("Verifies a single fsname consumer", func() { - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-mgt", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfLustreMGTSpec{ + Spec: nnfv1alpha2.NnfLustreMGTSpec{ Addresses: []string{"1.1.1.1@tcp"}, FsNameStart: "bbbbbbbb", }, @@ -87,13 +87,13 @@ var _ = Describe("NnfLustreMGT Controller Test", func() { }) It("Verifies two fsname consumers with fsname wrap", func() { - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-mgt", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfLustreMGTSpec{ + Spec: nnfv1alpha2.NnfLustreMGTSpec{ Addresses: []string{"1.1.1.1@tcp"}, FsNameStart: "zzzzzzzz", }, @@ -164,13 +164,13 @@ var _ = Describe("NnfLustreMGT Controller Test", func() { } Expect(k8sClient.Create(context.TODO(), configMap)).To(Succeed()) - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-mgt", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfLustreMGTSpec{ + Spec: nnfv1alpha2.NnfLustreMGTSpec{ Addresses: []string{"1.1.1.1@tcp"}, FsNameStart: "bbbbbbbb", FsNameStartReference: corev1.ObjectReference{ diff --git a/internal/controller/nnf_node_block_storage_controller.go b/internal/controller/nnf_node_block_storage_controller.go index 7e6ba2032..b50c9e9c2 100644 --- a/internal/controller/nnf_node_block_storage_controller.go +++ b/internal/controller/nnf_node_block_storage_controller.go @@ -39,8 +39,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + nnfec "github.com/NearNodeFlash/nnf-ec/pkg" ec "github.com/NearNodeFlash/nnf-ec/pkg/ec" + nnfevent "github.com/NearNodeFlash/nnf-ec/pkg/manager-event" + msgreg "github.com/NearNodeFlash/nnf-ec/pkg/manager-message-registry/registries" nnf "github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf" nnfnvme "github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme" openapi "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/common" @@ -48,7 +55,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice/nvme" ) @@ -68,17 +75,53 @@ type NnfNodeBlockStorageReconciler struct { Scheme *kruntime.Scheme SemaphoreForStart chan struct{} SemaphoreForDone chan struct{} + Options *nnfec.Options types.NamespacedName sync.Mutex + Events chan event.GenericEvent started bool reconcilerAwake bool } +// EventHandler implements event.Subscription. Every Upstream or Downstream event triggers a watch +// on all the NnfNodeBlockStorages. This is needed to create the StorageGroup for a compute node that +// was powered off when the Access list was updated. +func (r *NnfNodeBlockStorageReconciler) EventHandler(e nnfevent.Event) error { + log := r.Log.WithValues("nnf-ec event", "node-up/node-down") + + // Upstream link events + upstreamLinkEstablished := e.Is(msgreg.UpstreamLinkEstablishedFabric("", "")) || e.Is(msgreg.DegradedUpstreamLinkEstablishedFabric("", "")) + upstreamLinkDropped := e.Is(msgreg.UpstreamLinkDroppedFabric("", "")) + + // Downstream link events + downstreamLinkEstablished := e.Is(msgreg.DownstreamLinkEstablishedFabric("", "")) || e.Is(msgreg.DegradedDownstreamLinkEstablishedFabric("", "")) + downstreamLinkDropped := e.Is(msgreg.DownstreamLinkDroppedFabric("", "")) + + // Check if the event is one that we care about + if !upstreamLinkEstablished && !upstreamLinkDropped && !downstreamLinkEstablished && !downstreamLinkDropped { + return nil + } + + log.Info("triggering watch") + + r.Events <- event.GenericEvent{Object: &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-ec-event", + Namespace: "nnf-ec-event", + }, + }} + + return nil +} + func (r *NnfNodeBlockStorageReconciler) Start(ctx context.Context) error { log := r.Log.WithValues("State", "Start") + // Subscribe to the NNF Event Manager + nnfevent.EventManager.Subscribe(r) + <-r.SemaphoreForStart log.Info("Ready to start") @@ -116,7 +159,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. metrics.NnfNodeBlockStorageReconcilesTotal.Inc() - nodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{} + nodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{} if err := r.Get(ctx, req.NamespacedName, nodeBlockStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -125,7 +168,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. } // Ensure the NNF Storage Service is running prior to taking any action. - ss := nnf.NewDefaultStorageService() + ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) storageService := &sf.StorageServiceV150StorageService{} if err := ss.StorageServiceIdGet(ss.Id(), storageService); err != nil { return ctrl.Result{}, err @@ -135,7 +178,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfNodeBlockStorageStatus](nodeBlockStorage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfNodeBlockStorageStatus](nodeBlockStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nodeBlockStorage.Status.SetResourceErrorAndLog(err, log) }() @@ -183,9 +226,9 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. // Initialize the status section with empty allocation statuses. if len(nodeBlockStorage.Status.Allocations) == 0 { - nodeBlockStorage.Status.Allocations = make([]nnfv1alpha1.NnfNodeBlockStorageAllocationStatus, len(nodeBlockStorage.Spec.Allocations)) + nodeBlockStorage.Status.Allocations = make([]nnfv1alpha2.NnfNodeBlockStorageAllocationStatus, len(nodeBlockStorage.Spec.Allocations)) for i := range nodeBlockStorage.Status.Allocations { - nodeBlockStorage.Status.Allocations[i].Accesses = make(map[string]nnfv1alpha1.NnfNodeBlockStorageAccessStatus) + nodeBlockStorage.Status.Allocations[i].Accesses = make(map[string]nnfv1alpha2.NnfNodeBlockStorageAccessStatus) } return ctrl.Result{}, nil @@ -244,10 +287,10 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, nil } -func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1alpha2.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) - ss := nnf.NewDefaultStorageService() + ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) nvmeSS := nnfnvme.NewDefaultStorageService() allocationStatus := &nodeBlockStorage.Status.Allocations[index] @@ -264,7 +307,7 @@ func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1a } if len(allocationStatus.Devices) == 0 { - allocationStatus.Devices = make([]nnfv1alpha1.NnfNodeBlockStorageDeviceStatus, len(vc.Members)) + allocationStatus.Devices = make([]nnfv1alpha2.NnfNodeBlockStorageDeviceStatus, len(vc.Members)) } if len(allocationStatus.Devices) != len(vc.Members) { @@ -302,9 +345,9 @@ func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1a return nil, nil } -func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, nodeBlockStorage *nnfv1alpha2.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) - ss := nnf.NewDefaultStorageService() + ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) allocationStatus := &nodeBlockStorage.Status.Allocations[index] @@ -380,6 +423,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } else { // The kind environment doesn't support endpoints beyond the Rabbit if os.Getenv("ENVIRONMENT") == "kind" && endpointID != os.Getenv("RABBIT_NODE") { + allocationStatus.Accesses[nodeName] = nnfv1alpha2.NnfNodeBlockStorageAccessStatus{StorageGroupId: "fake-storage-group"} continue } @@ -389,7 +433,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } // Skip the endpoints that are not ready - if nnfv1alpha1.StaticResourceStatus(endPoint.Status) != nnfv1alpha1.ResourceReady { + if nnfv1alpha2.StaticResourceStatus(endPoint.Status) != nnfv1alpha2.ResourceReady { continue } @@ -399,13 +443,13 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } if allocationStatus.Accesses == nil { - allocationStatus.Accesses = make(map[string]nnfv1alpha1.NnfNodeBlockStorageAccessStatus) + allocationStatus.Accesses = make(map[string]nnfv1alpha2.NnfNodeBlockStorageAccessStatus) } // If the access status doesn't exist then we just created the resource. Save the ID in the NnfNodeBlockStorage if _, ok := allocationStatus.Accesses[nodeName]; !ok { log.Info("Created storage group", "Id", storageGroupId) - allocationStatus.Accesses[nodeName] = nnfv1alpha1.NnfNodeBlockStorageAccessStatus{StorageGroupId: sg.Id} + allocationStatus.Accesses[nodeName] = nnfv1alpha2.NnfNodeBlockStorageAccessStatus{StorageGroupId: sg.Id} } // The device paths are discovered below. This is only relevant for the Rabbit node access @@ -462,10 +506,10 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } -func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alpha2.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) - ss := nnf.NewDefaultStorageService() + ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) storagePoolID := getStoragePoolID(nodeBlockStorage, index) log.Info("Deleting storage pool", "Id", storagePoolID) @@ -487,7 +531,7 @@ func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alp return nil, nil } -func getStoragePoolID(nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) string { +func getStoragePoolID(nodeBlockStorage *nnfv1alpha2.NnfNodeBlockStorage, index int) string { return fmt.Sprintf("%s-%d", nodeBlockStorage.Name, index) } @@ -587,14 +631,57 @@ func (r *NnfNodeBlockStorageReconciler) deleteStorageGroup(ss nnf.StorageService return ss.StorageServiceIdStorageGroupIdDelete(ss.Id(), id) } +// Enqueue all the NnfNodeBlockStorage resources after an nnf-ec node-up/node-down event. If we +// can't List() the NnfNodeBlockStorages, trigger the watch again after 10 seconds. +func (r *NnfNodeBlockStorageReconciler) NnfEcEventEnqueueHandler(ctx context.Context, o client.Object) []reconcile.Request { + log := r.Log.WithValues("Event", "Enqueue") + + requests := []reconcile.Request{} + + // Find all the NnfNodeBlockStorage resources for this Rabbit so we can reconcile them. + listOptions := []client.ListOption{ + client.InNamespace(os.Getenv("NNF_NODE_NAME")), + } + + nnfNodeBlockStorageList := &nnfv1alpha2.NnfNodeBlockStorageList{} + if err := r.List(context.TODO(), nnfNodeBlockStorageList, listOptions...); err != nil { + log.Error(err, "Could not list block storages") + + // Wait ten seconds and trigger the watch again to retry + go func() { + time.Sleep(time.Second * 10) + + log.Info("triggering watch after List() error") + r.Events <- event.GenericEvent{Object: &nnfv1alpha2.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-ec-event", + Namespace: "nnf-ec-event", + }, + }} + }() + + return requests + } + + for _, nnfNodeBlockStorage := range nnfNodeBlockStorageList.Items { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: nnfNodeBlockStorage.GetName(), Namespace: nnfNodeBlockStorage.GetNamespace()}}) + } + + log.Info("Enqueuing resources", "requests", requests) + + return requests +} + // SetupWithManager sets up the controller with the Manager. func (r *NnfNodeBlockStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { if err := mgr.Add(r); err != nil { return err } + // nnf-ec is not thread safe, so we are limited to a single reconcile thread. return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: 1}). - For(&nnfv1alpha1.NnfNodeBlockStorage{}). + For(&nnfv1alpha2.NnfNodeBlockStorage{}). + WatchesRawSource(&source.Channel{Source: r.Events}, handler.EnqueueRequestsFromMapFunc(r.NnfEcEventEnqueueHandler)). Complete(r) } diff --git a/internal/controller/nnf_node_controller.go b/internal/controller/nnf_node_controller.go index 3c35d09bf..71e5773e7 100644 --- a/internal/controller/nnf_node_controller.go +++ b/internal/controller/nnf_node_controller.go @@ -38,11 +38,13 @@ import ( "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" nnfec "github.com/NearNodeFlash/nnf-ec/pkg" - event "github.com/NearNodeFlash/nnf-ec/pkg/manager-event" + nnfevent "github.com/NearNodeFlash/nnf-ec/pkg/manager-event" msgreg "github.com/NearNodeFlash/nnf-ec/pkg/manager-message-registry/registries" nnf "github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf" nvme "github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme" @@ -51,7 +53,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -71,6 +73,7 @@ type NnfNodeReconciler struct { types.NamespacedName sync.Mutex + Events chan event.GenericEvent started bool reconcilerAwake bool } @@ -115,7 +118,7 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { log.Info("Created Namespace") } - node := &nnfv1alpha1.NnfNode{} + node := &nnfv1alpha2.NnfNode{} if err := r.Get(ctx, r.NamespacedName, node); err != nil { if !errors.IsNotFound(err) { @@ -136,7 +139,7 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { } else { err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - node := &nnfv1alpha1.NnfNode{} + node := &nnfv1alpha2.NnfNode{} if err := r.Get(ctx, r.NamespacedName, node); err != nil { return err } @@ -152,8 +155,8 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { } // Mark the node's status as starting - if node.Status.Status != nnfv1alpha1.ResourceStarting { - node.Status.Status = nnfv1alpha1.ResourceStarting + if node.Status.Status != nnfv1alpha2.ResourceStarting { + node.Status.Status = nnfv1alpha2.ResourceStarting if err := r.Status().Update(ctx, node); err != nil { return err @@ -168,33 +171,10 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { return err } } - - storage := &dwsv1alpha2.Storage{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.Namespace, - Namespace: corev1.NamespaceDefault, - }, - } - - if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { - log := r.Log.WithValues("resource", client.ObjectKeyFromObject(storage)) - - if !errors.IsNotFound(err) { - log.Error(err, "get storage resource failed") - return err - } - - if err := r.Create(ctx, storage); err != nil { - log.Error(err, "create storage resource failed") - return err - } - - log.Info("created storage resource") - } } // Subscribe to the NNF Event Manager - event.EventManager.Subscribe(r) + nnfevent.EventManager.Subscribe(r) r.Lock() r.started = true @@ -207,24 +187,31 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { // EventHandler implements event.Subscription. Every Upstream or Downstream event runs the reconciler // so all the NNF Node server/drive status stays current. -func (r *NnfNodeReconciler) EventHandler(e event.Event) error { +func (r *NnfNodeReconciler) EventHandler(e nnfevent.Event) error { + log := r.Log.WithValues("nnf-ec event", "node-up/node-down") // Upstream link events - linkEstablished := e.Is(msgreg.UpstreamLinkEstablishedFabric("", "")) || e.Is(msgreg.DegradedUpstreamLinkEstablishedFabric("", "")) - linkDropped := e.Is(msgreg.UpstreamLinkDroppedFabric("", "")) - - if linkEstablished || linkDropped { - r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: r.NamespacedName}) - } + upstreamLinkEstablished := e.Is(msgreg.UpstreamLinkEstablishedFabric("", "")) || e.Is(msgreg.DegradedUpstreamLinkEstablishedFabric("", "")) + upstreamLinkDropped := e.Is(msgreg.UpstreamLinkDroppedFabric("", "")) // Downstream link events - linkEstablished = e.Is(msgreg.DownstreamLinkEstablishedFabric("", "")) || e.Is(msgreg.DegradedDownstreamLinkEstablishedFabric("", "")) - linkDropped = e.Is(msgreg.DownstreamLinkDroppedFabric("", "")) + downstreamLinkEstablished := e.Is(msgreg.DownstreamLinkEstablishedFabric("", "")) || e.Is(msgreg.DegradedDownstreamLinkEstablishedFabric("", "")) + downstreamLinkDropped := e.Is(msgreg.DownstreamLinkDroppedFabric("", "")) - if linkEstablished || linkDropped { - r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: r.NamespacedName}) + // Check if the event is one that we care about + if !upstreamLinkEstablished && !upstreamLinkDropped && !downstreamLinkEstablished && !downstreamLinkDropped { + return nil } + log.Info("triggering watch") + + r.Events <- event.GenericEvent{Object: &nnfv1alpha2.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.NamespacedName.Name, + Namespace: r.NamespacedName.Namespace, + }, + }} + return nil } @@ -248,7 +235,7 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re metrics.NnfNodeReconcilesTotal.Inc() - node := &nnfv1alpha1.NnfNode{} + node := &nnfv1alpha2.NnfNode{} if err := r.Get(ctx, req.NamespacedName, node); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -257,12 +244,12 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re } // Prepare to update the node's status - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfNodeStatus](node) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfNodeStatus](node) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Access the default storage service running in the NNF Element // Controller. Check for any State/Health change. - ss := nnf.NewDefaultStorageService() + ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) storageService := &sf.StorageServiceV150StorageService{} if err := ss.StorageServiceIdGet(ss.Id(), storageService); err != nil { @@ -270,8 +257,8 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re return ctrl.Result{}, err } - node.Status.Status = nnfv1alpha1.ResourceStatus(storageService.Status) - node.Status.Health = nnfv1alpha1.ResourceHealth(storageService.Status) + node.Status.Status = nnfv1alpha2.ResourceStatus(storageService.Status) + node.Status.Health = nnfv1alpha2.ResourceHealth(storageService.Status) if storageService.Status.State != sf.ENABLED_RST { return ctrl.Result{RequeueAfter: 1 * time.Second}, nil @@ -287,7 +274,7 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re node.Status.Capacity = capacitySource.ProvidedCapacity.Data.GuaranteedBytes node.Status.CapacityAllocated = capacitySource.ProvidedCapacity.Data.AllocatedBytes - if err := updateServers(node, log); err != nil { + if err := r.updateServers(node, log); err != nil { return ctrl.Result{}, err } @@ -361,28 +348,28 @@ func (r *NnfNodeReconciler) createNamespace() *corev1.Namespace { } } -func (r *NnfNodeReconciler) createNode() *nnfv1alpha1.NnfNode { - return &nnfv1alpha1.NnfNode{ +func (r *NnfNodeReconciler) createNode() *nnfv1alpha2.NnfNode { + return &nnfv1alpha2.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: r.Name, Namespace: r.Namespace, }, - Spec: nnfv1alpha1.NnfNodeSpec{ + Spec: nnfv1alpha2.NnfNodeSpec{ Name: r.Namespace, // Note the conversion here from namespace to name, each NNF Node is given a unique namespace, which then becomes how the NLC is controlled. Pod: os.Getenv("NNF_POD_NAME"), // Providing the podname gives users quick means to query the pod for a particular NNF Node - State: nnfv1alpha1.ResourceEnable, + State: nnfv1alpha2.ResourceEnable, }, - Status: nnfv1alpha1.NnfNodeStatus{ - Status: nnfv1alpha1.ResourceStarting, + Status: nnfv1alpha2.NnfNodeStatus{ + Status: nnfv1alpha2.ResourceStarting, Capacity: 0, }, } } // Update the Servers status of the NNF Node if necessary -func updateServers(node *nnfv1alpha1.NnfNode, log logr.Logger) error { +func (r *NnfNodeReconciler) updateServers(node *nnfv1alpha2.NnfNode, log logr.Logger) error { - ss := nnf.NewDefaultStorageService() + ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) // Update the server status' with the current values serverEndpointCollection := &sf.EndpointCollectionEndpointCollection{} @@ -392,7 +379,7 @@ func updateServers(node *nnfv1alpha1.NnfNode, log logr.Logger) error { } if len(node.Status.Servers) < len(serverEndpointCollection.Members) { - node.Status.Servers = make([]nnfv1alpha1.NnfServerStatus, len(serverEndpointCollection.Members)) + node.Status.Servers = make([]nnfv1alpha2.NnfServerStatus, len(serverEndpointCollection.Members)) } // Iterate over the server endpoints to ensure we've reflected @@ -406,11 +393,11 @@ func updateServers(node *nnfv1alpha1.NnfNode, log logr.Logger) error { return err } - node.Status.Servers[idx].NnfResourceStatus = nnfv1alpha1.NnfResourceStatus{ + node.Status.Servers[idx].NnfResourceStatus = nnfv1alpha2.NnfResourceStatus{ ID: serverEndpoint.Id, Name: serverEndpoint.Name, - Status: nnfv1alpha1.ResourceStatus(serverEndpoint.Status), - Health: nnfv1alpha1.ResourceHealth(serverEndpoint.Status), + Status: nnfv1alpha2.ResourceStatus(serverEndpoint.Status), + Health: nnfv1alpha2.ResourceHealth(serverEndpoint.Status), } } @@ -418,7 +405,7 @@ func updateServers(node *nnfv1alpha1.NnfNode, log logr.Logger) error { } // Update the Drives status of the NNF Node if necessary -func updateDrives(node *nnfv1alpha1.NnfNode, log logr.Logger) error { +func updateDrives(node *nnfv1alpha2.NnfNode, log logr.Logger) error { storageService := nvme.NewDefaultStorageService() storageCollection := &sf.StorageCollectionStorageCollection{} @@ -428,7 +415,7 @@ func updateDrives(node *nnfv1alpha1.NnfNode, log logr.Logger) error { } if len(node.Status.Drives) < len(storageCollection.Members) { - node.Status.Drives = make([]nnfv1alpha1.NnfDriveStatus, len(storageCollection.Members)) + node.Status.Drives = make([]nnfv1alpha2.NnfDriveStatus, len(storageCollection.Members)) } // Iterate over the storage devices and controllers to ensure we've reflected @@ -444,11 +431,11 @@ func updateDrives(node *nnfv1alpha1.NnfNode, log logr.Logger) error { } drive.Slot = fmt.Sprintf("%d", storage.Location.PartLocation.LocationOrdinalValue) - drive.NnfResourceStatus = nnfv1alpha1.NnfResourceStatus{ + drive.NnfResourceStatus = nnfv1alpha2.NnfResourceStatus{ ID: storage.Id, Name: storage.Name, - Status: nnfv1alpha1.ResourceStatus(storage.Status), - Health: nnfv1alpha1.ResourceHealth(storage.Status), + Status: nnfv1alpha2.ResourceStatus(storage.Status), + Health: nnfv1alpha2.ResourceHealth(storage.Status), } if storage.Status.State == sf.ENABLED_RST { @@ -512,8 +499,9 @@ func (r *NnfNodeReconciler) SetupWithManager(mgr ctrl.Manager) error { // There can be only one NnfNode resource for this controller to // manage, so we don't set MaxConcurrentReconciles. return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha1.NnfNode{}). + For(&nnfv1alpha2.NnfNode{}). Owns(&corev1.Namespace{}). // The node will create a namespace for itself, so it can watch changes to the NNF Node custom resource Watches(&dwsv1alpha2.SystemConfiguration{}, handler.EnqueueRequestsFromMapFunc(systemConfigurationMapFunc)). + WatchesRawSource(&source.Channel{Source: r.Events}, &handler.EnqueueRequestForObject{}). Complete(r) } diff --git a/internal/controller/nnf_node_ec_data_controller.go b/internal/controller/nnf_node_ec_data_controller.go index adde2ceef..3165824ad 100644 --- a/internal/controller/nnf_node_ec_data_controller.go +++ b/internal/controller/nnf_node_ec_data_controller.go @@ -34,7 +34,7 @@ import ( nnfec "github.com/NearNodeFlash/nnf-ec/pkg" ec "github.com/NearNodeFlash/nnf-ec/pkg/ec" "github.com/NearNodeFlash/nnf-ec/pkg/persistent" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" "github.com/go-logr/logr" @@ -71,14 +71,14 @@ func (r *NnfNodeECDataReconciler) Start(ctx context.Context) error { if !testing { // Create the resource if necessary - data := nnfv1alpha1.NnfNodeECData{} + data := nnfv1alpha2.NnfNodeECData{} if err := r.Get(ctx, r.NamespacedName, &data); err != nil { if !errors.IsNotFound(err) { return err } - data := nnfv1alpha1.NnfNodeECData{ + data := nnfv1alpha2.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: r.Name, Namespace: r.Namespace, @@ -175,7 +175,7 @@ func (*crdPersistentStorageInterface) Close() error { } func (psi *crdPersistentStorageInterface) View(fn func(persistent.PersistentStorageTransactionApi) error) error { - data := nnfv1alpha1.NnfNodeECData{} + data := nnfv1alpha2.NnfNodeECData{} if err := psi.reconciler.Get(context.TODO(), psi.reconciler.NamespacedName, &data); err != nil { return err } @@ -187,17 +187,17 @@ func (psi *crdPersistentStorageInterface) Update(fn func(persistent.PersistentSt Retry: - data := nnfv1alpha1.NnfNodeECData{} + data := nnfv1alpha2.NnfNodeECData{} if err := psi.reconciler.Get(context.TODO(), psi.reconciler.NamespacedName, &data); err != nil { return err } if data.Status.Data == nil { - data.Status.Data = make(map[string]nnfv1alpha1.NnfNodeECPrivateData) + data.Status.Data = make(map[string]nnfv1alpha2.NnfNodeECPrivateData) } if _, found := data.Status.Data[psi.name]; !found { - data.Status.Data[psi.name] = make(nnfv1alpha1.NnfNodeECPrivateData) + data.Status.Data[psi.name] = make(nnfv1alpha2.NnfNodeECPrivateData) } if err := fn(persistent.NewBase64PersistentStorageTransaction(data.Status.Data[psi.name])); err != nil { @@ -219,7 +219,7 @@ func (psi *crdPersistentStorageInterface) Delete(key string) error { Retry: - data := nnfv1alpha1.NnfNodeECData{} + data := nnfv1alpha2.NnfNodeECData{} if err := psi.reconciler.Get(context.TODO(), psi.reconciler.NamespacedName, &data); err != nil { return err } @@ -244,6 +244,6 @@ func (r *NnfNodeECDataReconciler) SetupWithManager(mgr ctrl.Manager) error { } return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha1.NnfNodeECData{}). + For(&nnfv1alpha2.NnfNodeECData{}). Complete(r) } diff --git a/internal/controller/nnf_node_storage_controller.go b/internal/controller/nnf_node_storage_controller.go index c238fb03d..1079ba178 100644 --- a/internal/controller/nnf_node_storage_controller.go +++ b/internal/controller/nnf_node_storage_controller.go @@ -39,7 +39,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -109,7 +109,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque metrics.NnfNodeStorageReconcilesTotal.Inc() - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{} + nnfNodeStorage := &nnfv1alpha2.NnfNodeStorage{} if err := r.Get(ctx, req.NamespacedName, nnfNodeStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -125,7 +125,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // so when we would normally call "return ctrl.Result{}, nil", at that time // "err" is nil - and if permitted we will update err with the result of // the r.Update() - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfNodeStorageStatus](nnfNodeStorage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfNodeStorageStatus](nnfNodeStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nnfNodeStorage.Status.SetResourceErrorAndLog(err, log) }() @@ -184,7 +184,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Initialize the status section with empty allocation statuses. if len(nnfNodeStorage.Status.Allocations) == 0 { - nnfNodeStorage.Status.Allocations = make([]nnfv1alpha1.NnfNodeStorageAllocationStatus, nnfNodeStorage.Spec.Count) + nnfNodeStorage.Status.Allocations = make([]nnfv1alpha2.NnfNodeStorageAllocationStatus, nnfNodeStorage.Spec.Count) for i := range nnfNodeStorage.Status.Allocations { nnfNodeStorage.Status.Allocations[i].Ready = false } @@ -228,7 +228,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } -func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeStorage", client.ObjectKeyFromObject(nnfNodeStorage), "index", index) blockDevice, fileSystem, err := getBlockDeviceAndFileSystem(ctx, r.Client, nnfNodeStorage, index, log) @@ -271,7 +271,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode return nil, nil } -func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, blockDevices []blockdevice.BlockDevice, fileSystems []filesystem.FileSystem) (*ctrl.Result, error) { +func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNodeStorage *nnfv1alpha2.NnfNodeStorage, blockDevices []blockdevice.BlockDevice, fileSystems []filesystem.FileSystem) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeStorage", client.ObjectKeyFromObject(nnfNodeStorage)) for index, blockDevice := range blockDevices { @@ -345,6 +345,6 @@ func (r *NnfNodeStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&nnfv1alpha1.NnfNodeStorage{}). + For(&nnfv1alpha2.NnfNodeStorage{}). Complete(r) } diff --git a/internal/controller/nnf_node_storage_controller_test.go b/internal/controller/nnf_node_storage_controller_test.go index e6796d71c..96f00993a 100644 --- a/internal/controller/nnf_node_storage_controller_test.go +++ b/internal/controller/nnf_node_storage_controller_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -31,13 +31,13 @@ import ( "k8s.io/apimachinery/pkg/types" nnf "github.com/NearNodeFlash/nnf-ec/pkg" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = PDescribe("NNF Node Storage Controller Test", func() { var ( key types.NamespacedName - storage *nnfv1alpha1.NnfNodeStorage + storage *nnfv1alpha2.NnfNodeStorage ) BeforeEach(func() { @@ -55,12 +55,12 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { Namespace: corev1.NamespaceDefault, } - storage = &nnfv1alpha1.NnfNodeStorage{ + storage = &nnfv1alpha2.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, }, - Spec: nnfv1alpha1.NnfNodeStorageSpec{ + Spec: nnfv1alpha2.NnfNodeStorageSpec{ Count: 1, }, } @@ -70,13 +70,13 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed()) Eventually(func() error { - expected := &nnfv1alpha1.NnfNodeStorage{} + expected := &nnfv1alpha2.NnfNodeStorage{} return k8sClient.Get(context.TODO(), key, expected) }, "3s", "1s").Should(Succeed(), "expected return after create. key: "+key.String()) }) AfterEach(func() { - expected := &nnfv1alpha1.NnfNodeStorage{} + expected := &nnfv1alpha2.NnfNodeStorage{} Expect(k8sClient.Get(context.TODO(), key, expected)).To(Succeed()) Expect(k8sClient.Delete(context.TODO(), expected)).To(Succeed()) }) @@ -87,7 +87,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { }) It("is successful", func() { - expected := &nnfv1alpha1.NnfNodeStorage{} + expected := &nnfv1alpha2.NnfNodeStorage{} Expect(k8sClient.Get(context.TODO(), key, expected)).To(Succeed()) }) }) @@ -96,7 +96,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { BeforeEach(func() { storage.Spec.FileSystemType = "lustre" - storage.Spec.LustreStorage = nnfv1alpha1.LustreStorageSpec{ + storage.Spec.LustreStorage = nnfv1alpha2.LustreStorageSpec{ FileSystemName: "test", StartIndex: 0, MgsAddress: "test", @@ -106,7 +106,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { }) It("is successful", func() { - expected := &nnfv1alpha1.NnfNodeStorage{} + expected := &nnfv1alpha2.NnfNodeStorage{} Expect(k8sClient.Get(context.TODO(), key, expected)).To(Succeed()) }) }) diff --git a/internal/controller/nnf_persistentstorageinstance_controller.go b/internal/controller/nnf_persistentstorageinstance_controller.go index be2c38087..1207ebc0a 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller.go +++ b/internal/controller/nnf_persistentstorageinstance_controller.go @@ -1,5 +1,5 @@ /* - * Copyright 2022, 2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -38,7 +38,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -153,8 +153,8 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, dwsv1alpha2.NewResourceError("").WithUserMessage("creating persistent MGT does not accept 'capacity' argument").WithFatal().WithUser() } labels := persistentStorage.GetLabels() - if _, ok := labels[nnfv1alpha1.StandaloneMGTLabel]; !ok { - labels[nnfv1alpha1.StandaloneMGTLabel] = pinnedProfile.Data.LustreStorage.StandaloneMGTPoolName + if _, ok := labels[nnfv1alpha2.StandaloneMGTLabel]; !ok { + labels[nnfv1alpha2.StandaloneMGTLabel] = pinnedProfile.Data.LustreStorage.StandaloneMGTPoolName persistentStorage.SetLabels(labels) if err := r.Update(ctx, persistentStorage); err != nil { if !apierrors.IsConflict(err) { @@ -193,7 +193,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } else if persistentStorage.Spec.State == dwsv1alpha2.PSIStateActive { // Wait for the NnfStorage to be ready before marking the persistent storage // state as "active" - nnfStorage := &nnfv1alpha1.NnfStorage{} + nnfStorage := &nnfv1alpha2.NnfStorage{} if err := r.Get(ctx, req.NamespacedName, nnfStorage); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -261,9 +261,9 @@ func (r *PersistentStorageReconciler) createServers(ctx context.Context, persist // SetupWithManager sets up the controller with the Manager. func (r *PersistentStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ - &nnfv1alpha1.NnfStorageList{}, + &nnfv1alpha2.NnfStorageList{}, &dwsv1alpha2.ServersList{}, - &nnfv1alpha1.NnfStorageProfileList{}, + &nnfv1alpha2.NnfStorageProfileList{}, } maxReconciles := runtime.GOMAXPROCS(0) @@ -271,7 +271,7 @@ func (r *PersistentStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&dwsv1alpha2.PersistentStorageInstance{}). Owns(&dwsv1alpha2.Servers{}). - Owns(&nnfv1alpha1.NnfStorage{}). - Owns(&nnfv1alpha1.NnfStorageProfile{}). + Owns(&nnfv1alpha2.NnfStorage{}). + Owns(&nnfv1alpha2.NnfStorageProfile{}). Complete(r) } diff --git a/internal/controller/nnf_persistentstorageinstance_controller_test.go b/internal/controller/nnf_persistentstorageinstance_controller_test.go index 4d3d2b7aa..1bb10efd4 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller_test.go +++ b/internal/controller/nnf_persistentstorageinstance_controller_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -30,12 +30,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("PersistentStorage test", func() { var ( - storageProfile *nnfv1alpha1.NnfStorageProfile + storageProfile *nnfv1alpha2.NnfStorageProfile ) BeforeEach(func() { @@ -45,7 +45,7 @@ var _ = Describe("PersistentStorage test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) @@ -83,7 +83,7 @@ var _ = Describe("PersistentStorage test", func() { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), servers) }).Should(Succeed(), "Create the DWS Servers Resource") - pinnedStorageProfile := &nnfv1alpha1.NnfStorageProfile{ + pinnedStorageProfile := &nnfv1alpha2.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.GetName(), Namespace: persistentStorage.GetNamespace(), diff --git a/internal/controller/nnf_port_manager_controller.go b/internal/controller/nnf_port_manager_controller.go index 579d41123..6f3b48ebe 100644 --- a/internal/controller/nnf_port_manager_controller.go +++ b/internal/controller/nnf_port_manager_controller.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -36,7 +36,7 @@ import ( "github.com/go-logr/logr" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) // NnfPortManagerReconciler reconciles a NnfPortManager object @@ -46,8 +46,8 @@ type NnfPortManagerReconciler struct { } // type aliases for name shortening -type AllocationSpec = nnfv1alpha1.NnfPortManagerAllocationSpec -type AllocationStatus = nnfv1alpha1.NnfPortManagerAllocationStatus +type AllocationSpec = nnfv1alpha2.NnfPortManagerAllocationSpec +type AllocationStatus = nnfv1alpha2.NnfPortManagerAllocationStatus //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfportmanagers,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfportmanagers/status,verbs=get;update;patch @@ -65,13 +65,13 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque log := log.FromContext(ctx) unsatisfiedRequests := 0 - mgr := &nnfv1alpha1.NnfPortManager{} + mgr := &nnfv1alpha2.NnfPortManager{} if err := r.Get(ctx, req.NamespacedName, mgr); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Create a resource status updater to ensure the status subresource is updated. - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfPortManagerStatus](mgr) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfPortManagerStatus](mgr) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Read in the system configuration which contains the available ports. @@ -82,14 +82,14 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque }, } - mgr.Status.Status = nnfv1alpha1.NnfPortManagerStatusReady + mgr.Status.Status = nnfv1alpha2.NnfPortManagerStatusReady if err := r.Get(ctx, client.ObjectKeyFromObject(config), config); err != nil { if !errors.IsNotFound(err) { return ctrl.Result{}, err } log.Info("System Configuration not found", "config", client.ObjectKeyFromObject(config).String()) - mgr.Status.Status = nnfv1alpha1.NnfPortManagerStatusSystemConfigurationNotFound + mgr.Status.Status = nnfv1alpha2.NnfPortManagerStatusSystemConfigurationNotFound res = ctrl.Result{Requeue: true} // Force a requeue - we want the manager to go ready even if there are zero allocations } @@ -100,18 +100,18 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // allocating the desired ports. for _, spec := range mgr.Spec.Allocations { var ports []uint16 - var status nnfv1alpha1.NnfPortManagerAllocationStatusStatus - var allocationStatus *nnfv1alpha1.NnfPortManagerAllocationStatus + var status nnfv1alpha2.NnfPortManagerAllocationStatusStatus + var allocationStatus *nnfv1alpha2.NnfPortManagerAllocationStatus // If the specification is already included in the allocations and InUse, continue allocationStatus = r.findAllocationStatus(mgr, spec) - if allocationStatus != nil && allocationStatus.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInUse { + if allocationStatus != nil && allocationStatus.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInUse { continue } // Determine if the port manager is ready and find a free port - if mgr.Status.Status != nnfv1alpha1.NnfPortManagerStatusReady { - ports, status = nil, nnfv1alpha1.NnfPortManagerAllocationStatusInvalidConfiguration + if mgr.Status.Status != nnfv1alpha2.NnfPortManagerStatusReady { + ports, status = nil, nnfv1alpha2.NnfPortManagerAllocationStatusInvalidConfiguration } else { ports, status = r.findFreePorts(log, mgr, config, spec) } @@ -119,7 +119,7 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque log.Info("Allocation", "requester", spec.Requester, "count", spec.Count, "ports", ports, "status", status) // Port could not be allocated - try again next time - if status != nnfv1alpha1.NnfPortManagerAllocationStatusInUse { + if status != nnfv1alpha2.NnfPortManagerAllocationStatusInUse { unsatisfiedRequests++ log.Info("Allocation unsatisfied", "requester", spec.Requester, "count", spec.Count, "ports", ports, "status", status) } @@ -135,7 +135,7 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque spec.Requester.DeepCopyInto(allocationStatus.Requester) if mgr.Status.Allocations == nil { - mgr.Status.Allocations = make([]nnfv1alpha1.NnfPortManagerAllocationStatus, 0) + mgr.Status.Allocations = make([]nnfv1alpha2.NnfPortManagerAllocationStatus, 0) } mgr.Status.Allocations = append(mgr.Status.Allocations, allocationStatus) @@ -158,8 +158,8 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // isAllocationNeeded returns true if the provided Port Allocation Status has a matching value // requester in the specification, and false otherwise. -func (r *NnfPortManagerReconciler) isAllocationNeeded(mgr *nnfv1alpha1.NnfPortManager, status *AllocationStatus) bool { - if status.Status != nnfv1alpha1.NnfPortManagerAllocationStatusInUse && status.Status != nnfv1alpha1.NnfPortManagerAllocationStatusInsufficientResources { +func (r *NnfPortManagerReconciler) isAllocationNeeded(mgr *nnfv1alpha2.NnfPortManager, status *AllocationStatus) bool { + if status.Status != nnfv1alpha2.NnfPortManagerAllocationStatusInUse && status.Status != nnfv1alpha2.NnfPortManagerAllocationStatusInsufficientResources { return false } @@ -176,7 +176,7 @@ func (r *NnfPortManagerReconciler) isAllocationNeeded(mgr *nnfv1alpha1.NnfPortMa return false } -func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr *nnfv1alpha1.NnfPortManager, cooldown int) { +func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr *nnfv1alpha2.NnfPortManager, cooldown int) { // Free unused allocations. This will check if the Status.Allocations exist in // the list of desired allocations in the Spec field and mark any unused allocations @@ -193,7 +193,7 @@ func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr if cooldown == 0 { allocsToRemove = append(allocsToRemove, idx) log.Info("Allocation unused - removing", "requester", status.Requester, "status", status.Status) - } else if status.Status == nnfv1alpha1.NnfPortManagerAllocationStatusCooldown { + } else if status.Status == nnfv1alpha2.NnfPortManagerAllocationStatusCooldown { period := now.Sub(status.TimeUnallocated.Time) log.Info("Allocation unused - checking cooldown", "requester", status.Requester, "status", status.Status, "period", period, "time", status.TimeUnallocated.String()) if period >= time.Duration(cooldown)*time.Second { @@ -202,7 +202,7 @@ func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr } } else if status.TimeUnallocated == nil { status.TimeUnallocated = &now - status.Status = nnfv1alpha1.NnfPortManagerAllocationStatusCooldown + status.Status = nnfv1alpha2.NnfPortManagerAllocationStatusCooldown log.Info("Allocation unused -- cooldown set", "requester", status.Requester, "status", status.Status) } } @@ -214,7 +214,7 @@ func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr } } -func (r *NnfPortManagerReconciler) findAllocationStatus(mgr *nnfv1alpha1.NnfPortManager, spec AllocationSpec) *AllocationStatus { +func (r *NnfPortManagerReconciler) findAllocationStatus(mgr *nnfv1alpha2.NnfPortManager, spec AllocationSpec) *AllocationStatus { for idx := range mgr.Status.Allocations { status := &mgr.Status.Allocations[idx] if status.Requester == nil { @@ -231,17 +231,17 @@ func (r *NnfPortManagerReconciler) findAllocationStatus(mgr *nnfv1alpha1.NnfPort // isAllocated returns true if the provided specification is in the Port Manager's allocation // status', and false otherwise. -func (r *NnfPortManagerReconciler) isAllocated(mgr *nnfv1alpha1.NnfPortManager, spec AllocationSpec) bool { +func (r *NnfPortManagerReconciler) isAllocated(mgr *nnfv1alpha2.NnfPortManager, spec AllocationSpec) bool { return r.findAllocationStatus(mgr, spec) != nil } // Find free ports to satisfy the provided specification. -func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alpha1.NnfPortManager, config *dwsv1alpha2.SystemConfiguration, spec AllocationSpec) ([]uint16, nnfv1alpha1.NnfPortManagerAllocationStatusStatus) { +func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alpha2.NnfPortManager, config *dwsv1alpha2.SystemConfiguration, spec AllocationSpec) ([]uint16, nnfv1alpha2.NnfPortManagerAllocationStatusStatus) { portsInUse := make([]uint16, 0) for _, status := range mgr.Status.Allocations { - if status.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInUse || - status.Status == nnfv1alpha1.NnfPortManagerAllocationStatusCooldown { + if status.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInUse || + status.Status == nnfv1alpha2.NnfPortManagerAllocationStatusCooldown { portsInUse = append(portsInUse, status.Ports...) } } @@ -276,7 +276,7 @@ func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alph if len(ports) >= count { log.Info("Ports claimed from system configuration", "ports", ports) - return ports[:count], nnfv1alpha1.NnfPortManagerAllocationStatusInUse + return ports[:count], nnfv1alpha2.NnfPortManagerAllocationStatusInUse } // If we still haven't found a sufficient number of free ports, free up unused allocations @@ -293,7 +293,7 @@ func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alph for idx := range mgr.Status.Allocations { status := &mgr.Status.Allocations[idx] - if status.Status == nnfv1alpha1.NnfPortManagerAllocationStatusFree { + if status.Status == nnfv1alpha2.NnfPortManagerAllocationStatusFree { log.Info("Ports claimed from free list", "ports", status.Ports) // Append this values ports to the returned ports. We could over-allocate here, but @@ -314,18 +314,18 @@ func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alph for len(ports) < count { switch claimPortsFromFreeAllocation() { case exhausted: - return []uint16{}, nnfv1alpha1.NnfPortManagerAllocationStatusInsufficientResources + return []uint16{}, nnfv1alpha2.NnfPortManagerAllocationStatusInsufficientResources case more: // loop again if needed } } - return ports[:count], nnfv1alpha1.NnfPortManagerAllocationStatusInUse + return ports[:count], nnfv1alpha2.NnfPortManagerAllocationStatusInUse } // SetupWithManager sets up the controller with the Manager. func (r *NnfPortManagerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha1.NnfPortManager{}). + For(&nnfv1alpha2.NnfPortManager{}). Complete(r) } diff --git a/internal/controller/nnf_port_manager_controller_test.go b/internal/controller/nnf_port_manager_controller_test.go index 6528e62b6..a031a34bd 100644 --- a/internal/controller/nnf_port_manager_controller_test.go +++ b/internal/controller/nnf_port_manager_controller_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { @@ -47,7 +47,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { Describe("NNF Port Manager Controller Test", func() { var cfg *dwsv1alpha2.SystemConfiguration - var mgr *nnfv1alpha1.NnfPortManager + var mgr *nnfv1alpha2.NnfPortManager portCooldown := 1 JustBeforeEach(func() { @@ -73,18 +73,18 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { } }) - mgr = &nnfv1alpha1.NnfPortManager{ + mgr = &nnfv1alpha2.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-port-manager", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfPortManagerSpec{ + Spec: nnfv1alpha2.NnfPortManagerSpec{ SystemConfiguration: corev1.ObjectReference{ Name: cfg.Name, Namespace: cfg.Namespace, Kind: reflect.TypeOf(*cfg).Name(), }, - Allocations: make([]nnfv1alpha1.NnfPortManagerAllocationSpec, 0), + Allocations: make([]nnfv1alpha2.NnfPortManagerAllocationSpec, 0), }, } Expect(k8sClient.Create(ctx, mgr)).To(Succeed()) @@ -103,10 +103,10 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { // Submit an allocation and verify it has been accounted for - this doesn't mean the ports // were successfully allocated, however. - allocatePorts := func(mgr *nnfv1alpha1.NnfPortManager, name string, count int) []uint16 { + allocatePorts := func(mgr *nnfv1alpha2.NnfPortManager, name string, count int) []uint16 { By(fmt.Sprintf("Reserving %d ports for '%s'", count, name)) - allocation := nnfv1alpha1.NnfPortManagerAllocationSpec{ + allocation := nnfv1alpha2.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{Name: name}, Count: count, } @@ -129,10 +129,10 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { } // Submit an allocation and expect it to be successfully allocated (i.e. ports InUse) - reservePorts := func(mgr *nnfv1alpha1.NnfPortManager, name string, count int) []uint16 { + reservePorts := func(mgr *nnfv1alpha2.NnfPortManager, name string, count int) []uint16 { ports := allocatePorts(mgr, name, count) - allocation := nnfv1alpha1.NnfPortManagerAllocationSpec{ + allocation := nnfv1alpha2.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{Name: name}, Count: count, } @@ -140,16 +140,16 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { status := r.findAllocationStatus(mgr, allocation) Expect(status).ToNot(BeNil()) Expect(status.Ports).To(HaveLen(allocation.Count)) - Expect(status.Status).To(Equal(nnfv1alpha1.NnfPortManagerAllocationStatusInUse)) + Expect(status.Status).To(Equal(nnfv1alpha2.NnfPortManagerAllocationStatusInUse)) return ports } - reservePortsAllowFail := func(mgr *nnfv1alpha1.NnfPortManager, name string, count int) []uint16 { + reservePortsAllowFail := func(mgr *nnfv1alpha2.NnfPortManager, name string, count int) []uint16 { return allocatePorts(mgr, name, count) } - releasePorts := func(mgr *nnfv1alpha1.NnfPortManager, name string) { + releasePorts := func(mgr *nnfv1alpha2.NnfPortManager, name string) { By(fmt.Sprintf("Releasing ports for '%s'", name)) requester := corev1.ObjectReference{Name: name} @@ -170,7 +170,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { // Simple way to fire the reconciler to test the cooldown handling // without having to reserve new ports. This is just to limit the scope // of the test. - kickPortManager := func(mgr *nnfv1alpha1.NnfPortManager) { + kickPortManager := func(mgr *nnfv1alpha2.NnfPortManager) { By("Kicking port manager to force reconcile") Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(mgr), mgr)).To(Succeed()) @@ -183,7 +183,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { } // Verify the number of allocations in the status allocation list that are InUse - verifyNumAllocations := func(mgr *nnfv1alpha1.NnfPortManager, status nnfv1alpha1.NnfPortManagerAllocationStatusStatus, count int) { + verifyNumAllocations := func(mgr *nnfv1alpha2.NnfPortManager, status nnfv1alpha2.NnfPortManagerAllocationStatusStatus, count int) { By(fmt.Sprintf("Verifying there are %d allocations with Status %s in the status allocation list", count, status)) Eventually(func() int { @@ -198,16 +198,16 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { }).Should(Equal(count)) } - verifyNumAllocationsInUse := func(mgr *nnfv1alpha1.NnfPortManager, count int) { - verifyNumAllocations(mgr, nnfv1alpha1.NnfPortManagerAllocationStatusInUse, count) + verifyNumAllocationsInUse := func(mgr *nnfv1alpha2.NnfPortManager, count int) { + verifyNumAllocations(mgr, nnfv1alpha2.NnfPortManagerAllocationStatusInUse, count) } - verifyNumAllocationsCooldown := func(mgr *nnfv1alpha1.NnfPortManager, count int) { - verifyNumAllocations(mgr, nnfv1alpha1.NnfPortManagerAllocationStatusCooldown, count) + verifyNumAllocationsCooldown := func(mgr *nnfv1alpha2.NnfPortManager, count int) { + verifyNumAllocations(mgr, nnfv1alpha2.NnfPortManagerAllocationStatusCooldown, count) } - verifyNumAllocationsInsuffientResources := func(mgr *nnfv1alpha1.NnfPortManager, count int) { - verifyNumAllocations(mgr, nnfv1alpha1.NnfPortManagerAllocationStatusInsufficientResources, count) + verifyNumAllocationsInsuffientResources := func(mgr *nnfv1alpha2.NnfPortManager, count int) { + verifyNumAllocations(mgr, nnfv1alpha2.NnfPortManagerAllocationStatusInsufficientResources, count) } waitForCooldown := func(extra int) { @@ -226,10 +226,10 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { kickPortManager(mgr) - Eventually(func() nnfv1alpha1.NnfPortManagerStatusStatus { + Eventually(func() nnfv1alpha2.NnfPortManagerStatusStatus { k8sClient.Get(ctx, client.ObjectKeyFromObject(mgr), mgr) return mgr.Status.Status - }).Should(Equal(nnfv1alpha1.NnfPortManagerStatusSystemConfigurationNotFound)) + }).Should(Equal(nnfv1alpha2.NnfPortManagerStatusSystemConfigurationNotFound)) }) }) @@ -334,7 +334,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { const name = "all" reservePorts(mgr, name, portEnd-portStart+1) - allocation := nnfv1alpha1.NnfPortManagerAllocationSpec{ + allocation := nnfv1alpha2.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{Name: "insufficient-resources"}, Count: 1, } @@ -353,7 +353,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { status := r.findAllocationStatus(mgr, allocation) Expect(status).ToNot(BeNil()) Expect(status.Ports).To(BeEmpty()) - Expect(status.Status).To(Equal(nnfv1alpha1.NnfPortManagerAllocationStatusInsufficientResources)) + Expect(status.Status).To(Equal(nnfv1alpha2.NnfPortManagerAllocationStatusInsufficientResources)) }) }) @@ -388,12 +388,12 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { By("Attempting to reserve an additional port and failing") ports := reservePortsAllowFail(mgr, "waiting", 1) - allocation := nnfv1alpha1.NnfPortManagerAllocationSpec{Requester: corev1.ObjectReference{Name: "waiting"}, Count: 1} + allocation := nnfv1alpha2.NnfPortManagerAllocationSpec{Requester: corev1.ObjectReference{Name: "waiting"}, Count: 1} status := r.findAllocationStatus(mgr, allocation) Expect(ports).To(HaveLen(0)) Expect(status).ToNot(BeNil()) - Expect(status.Status).To(Equal(nnfv1alpha1.NnfPortManagerAllocationStatusInsufficientResources)) + Expect(status.Status).To(Equal(nnfv1alpha2.NnfPortManagerAllocationStatusInsufficientResources)) verifyNumAllocationsInUse(mgr, portTotal) verifyNumAllocationsInsuffientResources(mgr, 1) diff --git a/internal/controller/nnf_storage_controller.go b/internal/controller/nnf_storage_controller.go index a71990aa0..7105dacba 100644 --- a/internal/controller/nnf_storage_controller.go +++ b/internal/controller/nnf_storage_controller.go @@ -44,7 +44,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -99,7 +99,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) log := r.Log.WithValues("NnfStorage", req.NamespacedName) metrics.NnfStorageReconcilesTotal.Inc() - storage := &nnfv1alpha1.NnfStorage{} + storage := &nnfv1alpha2.NnfStorage{} if err := r.Get(ctx, req.NamespacedName, storage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -110,7 +110,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Create an updater for the entire node. This will handle calls to r.Status().Update() such // that we can repeatedly make calls to the internal update method, with the final update // occuring on the on function exit. - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfStorageStatus](storage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfStorageStatus](storage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { storage.Status.SetResourceErrorAndLog(err, log) }() @@ -160,7 +160,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Initialize the status section of the NnfStorage if it hasn't been done already. if len(storage.Status.AllocationSets) != len(storage.Spec.AllocationSets) { - storage.Status.AllocationSets = make([]nnfv1alpha1.NnfStorageAllocationSetStatus, len(storage.Spec.AllocationSets)) + storage.Status.AllocationSets = make([]nnfv1alpha2.NnfStorageAllocationSetStatus, len(storage.Spec.AllocationSets)) for i := range storage.Status.AllocationSets { storage.Status.AllocationSets[i].Ready = false } @@ -256,7 +256,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } -func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage, persistentMgsReference corev1.ObjectReference) error { +func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage, persistentMgsReference corev1.ObjectReference) error { persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: persistentMgsReference.Name, @@ -276,7 +276,7 @@ func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context reference := corev1.ObjectReference{ Name: nnfStorage.Name, Namespace: nnfStorage.Namespace, - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), } for _, existingReference := range persistentStorage.Spec.ConsumerReferences { @@ -290,7 +290,7 @@ func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context return r.Update(ctx, persistentStorage) } -func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage, persistentMgsReference corev1.ObjectReference) error { +func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage, persistentMgsReference corev1.ObjectReference) error { persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: persistentMgsReference.Name, @@ -306,7 +306,7 @@ func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Cont reference := corev1.ObjectReference{ Name: nnfStorage.Name, Namespace: nnfStorage.Namespace, - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), } for i, existingReference := range persistentStorage.Spec.ConsumerReferences { @@ -319,7 +319,7 @@ func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Cont return nil } -func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", client.ObjectKeyFromObject(nnfStorage)) allocationSet := nnfStorage.Spec.AllocationSets[allocationSetIndex] @@ -327,7 +327,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt for i, node := range allocationSet.Nodes { // Per Rabbit namespace. - nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{ + nnfNodeBlockStorage := &nnfv1alpha2.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorageName(nnfStorage, allocationSetIndex, i), Namespace: node.Name, @@ -340,7 +340,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt dwsv1alpha2.AddOwnerLabels(nnfNodeBlockStorage, nnfStorage) labels := nnfNodeBlockStorage.GetLabels() - labels[nnfv1alpha1.AllocationSetLabel] = allocationSet.Name + labels[nnfv1alpha2.AllocationSetLabel] = allocationSet.Name nnfNodeBlockStorage.SetLabels(labels) expectedAllocations := node.Count @@ -350,7 +350,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt nnfNodeBlockStorage.Spec.SharedAllocation = allocationSet.SharedAllocation if len(nnfNodeBlockStorage.Spec.Allocations) == 0 { - nnfNodeBlockStorage.Spec.Allocations = make([]nnfv1alpha1.NnfNodeBlockStorageAllocationSpec, expectedAllocations) + nnfNodeBlockStorage.Spec.Allocations = make([]nnfv1alpha2.NnfNodeBlockStorageAllocationSpec, expectedAllocations) } if len(nnfNodeBlockStorage.Spec.Allocations) != expectedAllocations { @@ -400,15 +400,15 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt // Get the status from all the child NnfNodeBlockStorage resources and use them to build the status // for the NnfStorage. -func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: nnfStorage.Name, Namespace: nnfStorage.Namespace}) allocationSet := &nnfStorage.Status.AllocationSets[allocationSetIndex] allocationSet.AllocationCount = 0 - nnfNodeBlockStorageList := &nnfv1alpha1.NnfNodeBlockStorageList{} + nnfNodeBlockStorageList := &nnfv1alpha2.NnfNodeBlockStorageList{} matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) - matchLabels[nnfv1alpha1.AllocationSetLabel] = nnfStorage.Spec.AllocationSets[allocationSetIndex].Name + matchLabels[nnfv1alpha2.AllocationSetLabel] = nnfStorage.Spec.AllocationSets[allocationSetIndex].Name listOptions := []client.ListOption{ matchLabels, @@ -454,7 +454,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte // Create an NnfNodeStorage if it doesn't exist, or update it if it requires updating. Each // Rabbit node gets an NnfNodeStorage, and there may be multiple allocations requested in it. // This limits the number of resources that have to be broadcast to the Rabbits. -func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *nnfv1alpha2.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) if storage.Spec.FileSystemType == "lustre" { @@ -478,7 +478,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n } if mgsNode != "" { - nnfNode := &nnfv1alpha1.NnfNode{ + nnfNode := &nnfv1alpha2.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: mgsNode, @@ -499,12 +499,12 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n // Create the NnfLustreMGT resource if this allocation set is for an MGT allocationSet := storage.Spec.AllocationSets[allocationSetIndex] if allocationSet.TargetType == "mgt" || allocationSet.TargetType == "mgtmdt" { - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: storage.GetName(), Namespace: mgsNode, }, - Spec: nnfv1alpha1.NnfLustreMGTSpec{ + Spec: nnfv1alpha2.NnfLustreMGTSpec{ Addresses: []string{mgsAddress}, FsNameStart: "aaaaaaaa", }, @@ -540,7 +540,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n startIndex := 0 for i, node := range allocationSet.Nodes { // Per Rabbit namespace. - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{ + nnfNodeStorage := &nnfv1alpha2.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorageName(storage, allocationSetIndex, i), Namespace: node.Name, @@ -553,13 +553,13 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n dwsv1alpha2.AddOwnerLabels(nnfNodeStorage, storage) labels := nnfNodeStorage.GetLabels() - labels[nnfv1alpha1.AllocationSetLabel] = allocationSet.Name + labels[nnfv1alpha2.AllocationSetLabel] = allocationSet.Name nnfNodeStorage.SetLabels(labels) nnfNodeStorage.Spec.BlockReference = corev1.ObjectReference{ Name: nnfNodeStorageName(storage, allocationSetIndex, i), Namespace: node.Name, - Kind: reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfNodeBlockStorage{}).Name(), } nnfNodeStorage.Spec.Capacity = allocationSet.Capacity nnfNodeStorage.Spec.UserID = storage.Spec.UserID @@ -606,12 +606,12 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. -func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha1.NnfStorage, allocationSetIndex int, deleting bool) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha2.NnfStorage, allocationSetIndex int, deleting bool) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) - nnfNodeStorageList := &nnfv1alpha1.NnfNodeStorageList{} + nnfNodeStorageList := &nnfv1alpha2.NnfNodeStorageList{} matchLabels := dwsv1alpha2.MatchingOwner(storage) - matchLabels[nnfv1alpha1.AllocationSetLabel] = storage.Spec.AllocationSets[allocationSetIndex].Name + matchLabels[nnfv1alpha2.AllocationSetLabel] = storage.Spec.AllocationSets[allocationSetIndex].Name listOptions := []client.ListOption{ matchLabels, @@ -653,9 +653,9 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s return nil, nil } -func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage) (*nnfv1alpha1.NnfLustreMGT, error) { +func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage) (*nnfv1alpha2.NnfLustreMGT, error) { if nnfStorage.Status.LustreMgtReference != (corev1.ObjectReference{}) { - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: nnfStorage.Status.LustreMgtReference.Name, Namespace: nnfStorage.Status.LustreMgtReference.Namespace, @@ -669,12 +669,12 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf return nnfLustreMgt, nil } - nnfLustreMgtList := &nnfv1alpha1.NnfLustreMGTList{} + nnfLustreMgtList := &nnfv1alpha2.NnfLustreMGTList{} if err := r.List(ctx, nnfLustreMgtList, []client.ListOption{}...); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfLustreMGTs").WithError(err).WithMajor() } - var nnfLustreMgt *nnfv1alpha1.NnfLustreMGT = nil + var nnfLustreMgt *nnfv1alpha2.NnfLustreMGT = nil for i := range nnfLustreMgtList.Items { if func(list []string, search string) bool { for _, element := range list { @@ -701,7 +701,7 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf return nnfLustreMgt, nil } -func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage) (string, error) { +func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage) (string, error) { nnfLustreMgt, err := r.getLustreMgt(ctx, nnfStorage) if err != nil { return "", dwsv1alpha2.NewResourceError("could not get NnfLustreMGT for address: %s", nnfStorage.Status.MgsAddress).WithError(err) @@ -712,7 +712,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a nnfStorage.Status.LustreMgtReference = corev1.ObjectReference{ Name: nnfLustreMgt.Name, Namespace: nnfLustreMgt.Namespace, - Kind: reflect.TypeOf(nnfv1alpha1.NnfLustreMGT{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfLustreMGT{}).Name(), } // This will update the status section of the NnfStorage with the reference and requeue @@ -722,7 +722,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a reference := corev1.ObjectReference{ Name: nnfStorage.Name, Namespace: nnfStorage.Namespace, - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), } // Check the status section of the NnfLustreMGT to see if an fsname has been assigned yet @@ -753,7 +753,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a } -func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStorage *nnfv1alpha2.NnfStorage) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", client.ObjectKeyFromObject(nnfStorage)) // Don't create the clientmount in the test environment. Some tests don't fake out the @@ -912,7 +912,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. -func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, storage *nnfv1alpha1.NnfStorage, deleting bool) error { +func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, storage *nnfv1alpha2.NnfStorage, deleting bool) error { clientMountList := &dwsv1alpha2.ClientMountList{} matchLabels := dwsv1alpha2.MatchingOwner(storage) @@ -942,7 +942,7 @@ func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, s // or the object references in the storage resource. We may have created children // that aren't in the cache and we may not have been able to add the object reference // to the NnfStorage. -func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnfv1alpha1.NnfStorage) (nodeStoragesState, error) { +func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnfv1alpha2.NnfStorage) (nodeStoragesState, error) { // Delete any clientmounts that were created by the NnfStorage. deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, []dwsv1alpha2.ObjectList{&dwsv1alpha2.ClientMountList{}}, storage) if err != nil { @@ -961,15 +961,15 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // Delete the OSTs and MDTs first so we can drop the claim on the NnfLustreMgt resource. This will trigger // an lctl command to run to remove the fsname from the MGT. childObjects := []dwsv1alpha2.ObjectList{ - &nnfv1alpha1.NnfNodeStorageList{}, + &nnfv1alpha2.NnfNodeStorageList{}, } - ostDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha1.AllocationSetLabel: "ost"}) + ostDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha2.AllocationSetLabel: "ost"}) if err != nil { return nodeStoragesExist, err } - mdtDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha1.AllocationSetLabel: "mdt"}) + mdtDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha2.AllocationSetLabel: "mdt"}) if err != nil { return nodeStoragesExist, err } @@ -1033,7 +1033,7 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // releaseLustreMGT removes the claim from NnfLustreMGT and returns "true" once the NnfLustreMGT has removed // the entry from the status section, indicating that the fsname has been removed from the MGT -func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nnfv1alpha1.NnfStorage) (bool, error) { +func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nnfv1alpha2.NnfStorage) (bool, error) { if storage.Spec.FileSystemType != "lustre" { return true, nil } @@ -1042,7 +1042,7 @@ func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nn return true, nil } - nnfLustreMgt := &nnfv1alpha1.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha2.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: storage.Status.LustreMgtReference.Name, Namespace: storage.Status.LustreMgtReference.Namespace, @@ -1084,26 +1084,41 @@ func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nn // - NnfStorages from multiple namespaces create NnfNodeStorages in the same namespace // - Different allocations in an NnfStorage could be targeting the same Rabbit node (e.g., MGS and MDS on the same Rabbit) // - The same Rabbit node could be listed more than once within the same allocation. -func nnfNodeStorageName(storage *nnfv1alpha1.NnfStorage, allocationSetIndex int, i int) string { - return storage.Namespace + "-" + storage.Name + "-" + storage.Spec.AllocationSets[allocationSetIndex].Name + "-" + strconv.Itoa(i) +func nnfNodeStorageName(storage *nnfv1alpha2.NnfStorage, allocationSetIndex int, i int) string { + nodeName := storage.Spec.AllocationSets[allocationSetIndex].Nodes[i].Name + + // If the same Rabbit is listed more than once, the index on the end of the name needs to show + // which instance this is. + duplicateRabbitIndex := 0 + for j, node := range storage.Spec.AllocationSets[allocationSetIndex].Nodes { + if j == i { + break + } + + if node.Name == nodeName { + duplicateRabbitIndex++ + } + } + + return storage.Namespace + "-" + storage.Name + "-" + storage.Spec.AllocationSets[allocationSetIndex].Name + "-" + strconv.Itoa(duplicateRabbitIndex) } // SetupWithManager sets up the controller with the Manager. func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ &dwsv1alpha2.ClientMountList{}, - &nnfv1alpha1.NnfNodeStorageList{}, - &nnfv1alpha1.NnfNodeBlockStorageList{}, - &nnfv1alpha1.NnfLustreMGTList{}, - &nnfv1alpha1.NnfStorageProfileList{}, + &nnfv1alpha2.NnfNodeStorageList{}, + &nnfv1alpha2.NnfNodeBlockStorageList{}, + &nnfv1alpha2.NnfLustreMGTList{}, + &nnfv1alpha2.NnfStorageProfileList{}, } maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&nnfv1alpha1.NnfStorage{}). - Watches(&nnfv1alpha1.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&nnfv1alpha1.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + For(&nnfv1alpha2.NnfStorage{}). + Watches(&nnfv1alpha2.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha2.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_systemconfiguration_controller.go b/internal/controller/nnf_systemconfiguration_controller.go index 18ee40a09..dbb80bb99 100644 --- a/internal/controller/nnf_systemconfiguration_controller.go +++ b/internal/controller/nnf_systemconfiguration_controller.go @@ -38,7 +38,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -220,12 +220,12 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, labels = make(map[string]string) } - if _, present := labels[v1alpha1.TaintsAndLabelsCompletedLabel]; present { + if _, present := labels[nnfv1alpha2.TaintsAndLabelsCompletedLabel]; present { continue } taint := &corev1.Taint{ - Key: v1alpha1.RabbitNodeTaintKey, + Key: nnfv1alpha2.RabbitNodeTaintKey, Value: "true", Effect: effect, } @@ -239,7 +239,7 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, return false, err } // All passes completed on this node. - labels[v1alpha1.TaintsAndLabelsCompletedLabel] = "true" + labels[nnfv1alpha2.TaintsAndLabelsCompletedLabel] = "true" doUpdate = true node.SetLabels(labels) } else { @@ -252,8 +252,8 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, } // Add the label. - if _, present := labels[v1alpha1.RabbitNodeSelectorLabel]; !present { - labels[v1alpha1.RabbitNodeSelectorLabel] = "true" + if _, present := labels[nnfv1alpha2.RabbitNodeSelectorLabel]; !present { + labels[nnfv1alpha2.RabbitNodeSelectorLabel] = "true" doUpdate = true node.SetLabels(labels) } diff --git a/internal/controller/nnf_systemconfiguration_controller_test.go b/internal/controller/nnf_systemconfiguration_controller_test.go index 1ed042537..d78af1654 100644 --- a/internal/controller/nnf_systemconfiguration_controller_test.go +++ b/internal/controller/nnf_systemconfiguration_controller_test.go @@ -21,7 +21,6 @@ package controller import ( "context" - "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("NnfSystemconfigurationController", func() { @@ -80,12 +80,12 @@ var _ = Describe("Adding taints and labels to nodes", func() { var sysCfg *dwsv1alpha2.SystemConfiguration taintNoSchedule := &corev1.Taint{ - Key: v1alpha1.RabbitNodeTaintKey, + Key: nnfv1alpha2.RabbitNodeTaintKey, Value: "true", Effect: corev1.TaintEffectNoSchedule, } taintNoExecute := &corev1.Taint{ - Key: v1alpha1.RabbitNodeTaintKey, + Key: nnfv1alpha2.RabbitNodeTaintKey, Value: "true", Effect: corev1.TaintEffectNoExecute, } @@ -147,8 +147,8 @@ var _ = Describe("Adding taints and labels to nodes", func() { Eventually(func(g Gomega) { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node), tnode)) labels := tnode.GetLabels() - g.Expect(labels).To(HaveKeyWithValue(v1alpha1.RabbitNodeSelectorLabel, "true")) - g.Expect(labels).To(HaveKeyWithValue(v1alpha1.TaintsAndLabelsCompletedLabel, "true")) + g.Expect(labels).To(HaveKeyWithValue(nnfv1alpha2.RabbitNodeSelectorLabel, "true")) + g.Expect(labels).To(HaveKeyWithValue(nnfv1alpha2.TaintsAndLabelsCompletedLabel, "true")) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoSchedule)).To(BeTrue()) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoExecute)).To(BeFalse()) }).Should(Succeed(), "verify failed for node %s", node.Name) @@ -165,7 +165,7 @@ var _ = Describe("Adding taints and labels to nodes", func() { // Remove the "cleared" label from node1. Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node1), node1)) labels := node1.GetLabels() - delete(labels, v1alpha1.TaintsAndLabelsCompletedLabel) + delete(labels, nnfv1alpha2.TaintsAndLabelsCompletedLabel) node1.SetLabels(labels) Expect(k8sClient.Update(context.TODO(), node1)).To(Succeed()) By("verifying node1 is repaired") @@ -195,8 +195,8 @@ var _ = Describe("Adding taints and labels to nodes", func() { tnode := &corev1.Node{} g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node4), tnode)) labels := tnode.GetLabels() - g.Expect(labels).ToNot(HaveKey(v1alpha1.RabbitNodeSelectorLabel)) - g.Expect(labels).ToNot(HaveKey(v1alpha1.TaintsAndLabelsCompletedLabel)) + g.Expect(labels).ToNot(HaveKey(nnfv1alpha2.RabbitNodeSelectorLabel)) + g.Expect(labels).ToNot(HaveKey(nnfv1alpha2.TaintsAndLabelsCompletedLabel)) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoSchedule)).To(BeFalse()) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoExecute)).To(BeFalse()) }).Should(Succeed(), "verify failed for node %s", node4.Name) diff --git a/internal/controller/nnf_workflow_controller.go b/internal/controller/nnf_workflow_controller.go index c8b9ed95f..182a8b97b 100644 --- a/internal/controller/nnf_workflow_controller.go +++ b/internal/controller/nnf_workflow_controller.go @@ -47,7 +47,7 @@ import ( "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -451,7 +451,7 @@ func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow * name, namespace := getStorageReferenceNameFromWorkflowActual(workflow, index) // Check whether the NnfStorage has finished creating the storage. - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -501,7 +501,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo // Prepare the provided staging parameter for data-movement. Param is the source/destination value from the #DW copy_in/copy_out directive; based // on the param prefix we determine the storage instance and access requirements for data movement. - prepareStagingArgumentFn := func(param string) (*corev1.ObjectReference, *nnfv1alpha1.NnfAccess, *result, error) { + prepareStagingArgumentFn := func(param string) (*corev1.ObjectReference, *nnfv1alpha2.NnfAccess, *result, error) { var storageReference *corev1.ObjectReference name, _ := splitStagingArgumentIntoNameAndPath(param) @@ -532,7 +532,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo nnfStorageName = indexedResourceName(workflow, parentDwIndex) } - storage := &nnfv1alpha1.NnfStorage{ + storage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfStorageName, Namespace: workflow.Namespace, @@ -544,7 +544,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } storageReference = &corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), Name: storage.Name, Namespace: storage.Namespace, } @@ -609,7 +609,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } // Wait for accesses to go ready - for _, access := range []*nnfv1alpha1.NnfAccess{sourceAccess, destAccess} { + for _, access := range []*nnfv1alpha2.NnfAccess{sourceAccess, destAccess} { if access != nil { if err := r.Get(ctx, client.ObjectKeyFromObject(access), access); err != nil { return nil, dwsv1alpha2.NewResourceError("could not get NnfAccess %v", client.ObjectKeyFromObject(access)).WithError(err).WithUserMessage("could not create data movement mount points") @@ -622,9 +622,9 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } // Verify data movement is ready - dmm := &nnfv1alpha1.NnfDataMovementManager{ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha1.DataMovementManagerName, - Namespace: nnfv1alpha1.DataMovementNamespace, + dmm := &nnfv1alpha2.NnfDataMovementManager{ObjectMeta: metav1.ObjectMeta{ + Name: nnfv1alpha2.DataMovementManagerName, + Namespace: nnfv1alpha2.DataMovementNamespace, }} if err := r.Get(ctx, client.ObjectKeyFromObject(dmm), dmm); err != nil { return nil, dwsv1alpha2.NewResourceError("could not get NnfDataMovementManager %v", client.ObjectKeyFromObject(dmm)).WithError(err).WithUserMessage("could not determine data movement readiness") @@ -644,7 +644,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo targetStorageRef = sourceStorage } - targetStorage := &nnfv1alpha1.NnfStorage{ + targetStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: targetStorageRef.Name, Namespace: targetStorageRef.Namespace, @@ -665,7 +665,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo return nil, dwsv1alpha2.NewResourceError("could not get NnfDataMovementProfile %s", indexedResourceName(workflow, index)).WithError(err).WithUserMessage("could not find data movement profile") } dmProfileRef := corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha1.NnfDataMovementProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfDataMovementProfile{}).Name(), Name: dmProfile.Name, Namespace: dmProfile.Namespace, } @@ -684,17 +684,17 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo for _, node := range nodes { for i := 0; i < node.Count; i++ { - dm := &nnfv1alpha1.NnfDataMovement{ + dm := &nnfv1alpha2.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", indexedResourceName(workflow, index), i), Namespace: node.Name, }, - Spec: nnfv1alpha1.NnfDataMovementSpec{ - Source: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Spec: nnfv1alpha2.NnfDataMovementSpec{ + Source: &nnfv1alpha2.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, sourceStorage, sourceAccess, source, node.Name, i), StorageReference: *sourceStorage, }, - Destination: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Destination: &nnfv1alpha2.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, destStorage, destAccess, dest, node.Name, i), StorageReference: *destStorage, }, @@ -706,8 +706,8 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo dwsv1alpha2.AddWorkflowLabels(dm, workflow) dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha1.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) - nnfv1alpha1.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) + nnfv1alpha2.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) + nnfv1alpha2.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) addDirectiveIndexLabel(dm, index) log.Info("Creating NNF Data Movement", "name", client.ObjectKeyFromObject(dm).String()) @@ -722,17 +722,17 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo case "lustre": - dm := &nnfv1alpha1.NnfDataMovement{ + dm := &nnfv1alpha2.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), - Namespace: nnfv1alpha1.DataMovementNamespace, + Namespace: nnfv1alpha2.DataMovementNamespace, }, - Spec: nnfv1alpha1.NnfDataMovementSpec{ - Source: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Spec: nnfv1alpha2.NnfDataMovementSpec{ + Source: &nnfv1alpha2.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, sourceStorage, sourceAccess, source, "", 0), StorageReference: *sourceStorage, }, - Destination: &nnfv1alpha1.NnfDataMovementSpecSourceDestination{ + Destination: &nnfv1alpha2.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, destStorage, destAccess, dest, "", 0), StorageReference: *destStorage, }, @@ -744,8 +744,8 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo dwsv1alpha2.AddWorkflowLabels(dm, workflow) dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha1.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) - nnfv1alpha1.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) + nnfv1alpha2.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) + nnfv1alpha2.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) addDirectiveIndexLabel(dm, index) log.Info("Creating NNF Data Movement", "name", client.ObjectKeyFromObject(dm).String()) @@ -765,10 +765,10 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl // Wait for data movement resources to complete matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha1.DirectiveIndexLabel] = strconv.Itoa(index) - matchingLabels[nnfv1alpha1.DataMovementTeardownStateLabel] = string(workflow.Status.State) + matchingLabels[nnfv1alpha2.DirectiveIndexLabel] = strconv.Itoa(index) + matchingLabels[nnfv1alpha2.DataMovementTeardownStateLabel] = string(workflow.Status.State) - dataMovementList := &nnfv1alpha1.NnfDataMovementList{} + dataMovementList := &nnfv1alpha2.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } @@ -780,7 +780,7 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl } for _, dm := range dataMovementList.Items { - if dm.Status.State != nnfv1alpha1.DataMovementConditionTypeFinished { + if dm.Status.State != nnfv1alpha2.DataMovementConditionTypeFinished { return Requeue("pending data movement").withObject(&dm), nil } } @@ -788,7 +788,7 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl // Check results of data movement operations // TODO: Detailed Fail Message? for _, dm := range dataMovementList.Items { - if dm.Status.Status != nnfv1alpha1.DataMovementConditionReasonSuccess { + if dm.Status.Status != nnfv1alpha2.DataMovementConditionReasonSuccess { handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("").WithUserMessage( fmt.Sprintf("data movement operation failed during '%s', message: %s", workflow.Status.State, dm.Status.Message)). WithFatal(), workflow, index) @@ -835,7 +835,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index) + "-computes", Namespace: workflow.Namespace, @@ -871,7 +871,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * // which shares the same name with the NNFStorage. Name: name, Namespace: namespace, - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), } return ctrl.SetControllerReference(workflow, access, r.Scheme) @@ -901,7 +901,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * if fsType == "gfs2" || fsType == "lustre" { name, namespace := getStorageReferenceNameFromWorkflowActual(workflow, index) - storage := &nnfv1alpha1.NnfStorage{ + storage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -980,15 +980,15 @@ func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow // Wait for data movement resources to complete matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha1.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) + matchingLabels[nnfv1alpha2.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) - dataMovementList := &nnfv1alpha1.NnfDataMovementList{} + dataMovementList := &nnfv1alpha2.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } for _, dm := range dataMovementList.Items { - if dm.Status.State != nnfv1alpha1.DataMovementConditionTypeFinished { + if dm.Status.State != nnfv1alpha2.DataMovementConditionTypeFinished { return Requeue("pending data movement").withObject(&dm), nil } } @@ -1025,19 +1025,19 @@ func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow // Any user created copy-offload data movement requests created during run must report any errors to the workflow. // TODO: Customer asked if this could be optional matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha1.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) + matchingLabels[nnfv1alpha2.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) - dataMovementList := &nnfv1alpha1.NnfDataMovementList{} + dataMovementList := &nnfv1alpha2.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } for _, dm := range dataMovementList.Items { - if dm.Status.State != nnfv1alpha1.DataMovementConditionTypeFinished { + if dm.Status.State != nnfv1alpha2.DataMovementConditionTypeFinished { return Requeue("pending data movement").withObject(&dm), nil } - if dm.Status.Status == nnfv1alpha1.DataMovementConditionReasonFailed { + if dm.Status.Status == nnfv1alpha2.DataMovementConditionReasonFailed { handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("data movement %v failed", client.ObjectKeyFromObject(&dm)).WithUserMessage("data movement failed").WithFatal(), workflow, index) return Requeue("error").withObject(&dm), nil } @@ -1060,11 +1060,11 @@ func (r *NnfWorkflowReconciler) startTeardownState(ctx context.Context, workflow // copy_in/out directives can reference NnfStorage from a different directive, so all the NnfAccesses // need to be removed first. childObjects := []dwsv1alpha2.ObjectList{ - &nnfv1alpha1.NnfDataMovementList{}, - &nnfv1alpha1.NnfAccessList{}, + &nnfv1alpha2.NnfDataMovementList{}, + &nnfv1alpha2.NnfAccessList{}, } - deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha1.DirectiveIndexLabel: strconv.Itoa(index)}) + deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha2.DirectiveIndexLabel: strconv.Itoa(index)}) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not delete NnfDataMovement and NnfAccess children").WithError(err).WithUserMessage("could not stop data movement and unmount file systems") } @@ -1101,7 +1101,7 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo persistentStorage.SetOwnerReferences([]metav1.OwnerReference{}) dwsv1alpha2.RemoveOwnerLabels(persistentStorage) labels := persistentStorage.GetLabels() - delete(labels, nnfv1alpha1.DirectiveIndexLabel) + delete(labels, nnfv1alpha2.DirectiveIndexLabel) persistentStorage.SetLabels(labels) err = r.Update(ctx, persistentStorage) @@ -1158,11 +1158,11 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo } childObjects := []dwsv1alpha2.ObjectList{ - &nnfv1alpha1.NnfStorageList{}, + &nnfv1alpha2.NnfStorageList{}, &dwsv1alpha2.PersistentStorageInstanceList{}, } - deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha1.DirectiveIndexLabel: strconv.Itoa(index)}) + deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha2.DirectiveIndexLabel: strconv.Itoa(index)}) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not delete NnfStorage and PersistentStorageInstance children").WithError(err).WithUserMessage("could not delete storage allocations") } @@ -1177,9 +1177,9 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo // SetupWithManager sets up the controller with the Manager. func (r *NnfWorkflowReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ - &nnfv1alpha1.NnfDataMovementList{}, - &nnfv1alpha1.NnfAccessList{}, - &nnfv1alpha1.NnfStorageList{}, + &nnfv1alpha2.NnfDataMovementList{}, + &nnfv1alpha2.NnfAccessList{}, + &nnfv1alpha2.NnfStorageList{}, &dwsv1alpha2.PersistentStorageInstanceList{}, &dwsv1alpha2.DirectiveBreakdownList{}, } @@ -1188,10 +1188,10 @@ func (r *NnfWorkflowReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&dwsv1alpha2.Workflow{}). - Owns(&nnfv1alpha1.NnfAccess{}). + Owns(&nnfv1alpha2.NnfAccess{}). Owns(&dwsv1alpha2.DirectiveBreakdown{}). Owns(&dwsv1alpha2.PersistentStorageInstance{}). - Watches(&nnfv1alpha1.NnfDataMovement{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&nnfv1alpha1.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha2.NnfDataMovement{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha2.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_workflow_controller_container_helpers.go b/internal/controller/nnf_workflow_controller_container_helpers.go index 4b40ebb95..6647d4404 100644 --- a/internal/controller/nnf_workflow_controller_container_helpers.go +++ b/internal/controller/nnf_workflow_controller_container_helpers.go @@ -26,8 +26,7 @@ import ( "strings" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/go-logr/logr" mpicommonv1 "github.com/kubeflow/common/pkg/apis/common/v1" mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -43,7 +42,7 @@ import ( type nnfUserContainer struct { workflow *dwsv1alpha2.Workflow - profile *nnfv1alpha1.NnfContainerProfile + profile *nnfv1alpha2.NnfContainerProfile nnfNodes []string volumes []nnfContainerVolume username string @@ -78,7 +77,7 @@ func (c *nnfUserContainer) createMPIJob() error { } c.profile.Data.MPISpec.DeepCopyInto(&mpiJob.Spec) - c.username = nnfv1alpha1.ContainerMPIUser + c.username = nnfv1alpha2.ContainerMPIUser if err := c.applyLabels(&mpiJob.ObjectMeta); err != nil { return err @@ -251,10 +250,10 @@ func (c *nnfUserContainer) applyLabels(job metav1.Object) error { dwsv1alpha2.AddWorkflowLabels(job, c.workflow) labels := job.GetLabels() - labels[nnfv1alpha1.ContainerLabel] = c.workflow.Name - labels[nnfv1alpha1.PinnedContainerProfileLabelName] = c.profile.GetName() - labels[nnfv1alpha1.PinnedContainerProfileLabelNameSpace] = c.profile.GetNamespace() - labels[nnfv1alpha1.DirectiveIndexLabel] = strconv.Itoa(c.index) + labels[nnfv1alpha2.ContainerLabel] = c.workflow.Name + labels[nnfv1alpha2.PinnedContainerProfileLabelName] = c.profile.GetName() + labels[nnfv1alpha2.PinnedContainerProfileLabelNameSpace] = c.profile.GetNamespace() + labels[nnfv1alpha2.DirectiveIndexLabel] = strconv.Itoa(c.index) job.SetLabels(labels) if err := ctrl.SetControllerReference(c.workflow, job, c.scheme); err != nil { @@ -267,7 +266,7 @@ func (c *nnfUserContainer) applyLabels(job metav1.Object) error { func (c *nnfUserContainer) applyTolerations(spec *corev1.PodSpec) { spec.Tolerations = append(spec.Tolerations, corev1.Toleration{ Effect: corev1.TaintEffectNoSchedule, - Key: v1alpha1.RabbitNodeTaintKey, + Key: nnfv1alpha2.RabbitNodeTaintKey, Operator: corev1.TolerationOpEqual, Value: "true", }) @@ -441,7 +440,7 @@ func (c *nnfUserContainer) getHostPorts() ([]uint16, error) { // Get the ports from the port manager for this workflow for _, alloc := range pm.Status.Allocations { - if alloc.Requester != nil && alloc.Requester.UID == c.workflow.UID && alloc.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInUse { + if alloc.Requester != nil && alloc.Requester.UID == c.workflow.UID && alloc.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInUse { ports = append(ports, alloc.Ports...) } } diff --git a/internal/controller/nnf_workflow_controller_helpers.go b/internal/controller/nnf_workflow_controller_helpers.go index b3c357591..891fcaf83 100644 --- a/internal/controller/nnf_workflow_controller_helpers.go +++ b/internal/controller/nnf_workflow_controller_helpers.go @@ -34,7 +34,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/go-logr/logr" mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -554,8 +554,8 @@ func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, d } -func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow *dwsv1alpha2.Workflow, s *dwsv1alpha2.Servers, index int, log logr.Logger) (*nnfv1alpha1.NnfStorage, error) { - nnfStorage := &nnfv1alpha1.NnfStorage{ +func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow *dwsv1alpha2.Workflow, s *dwsv1alpha2.Servers, index int, log logr.Logger) (*nnfv1alpha2.NnfStorage, error) { + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: s.Name, Namespace: s.Namespace, @@ -644,11 +644,11 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * } // Need to remove all of the AllocationSets in the NnfStorage object before we begin - nnfStorage.Spec.AllocationSets = []nnfv1alpha1.NnfStorageAllocationSetSpec{} + nnfStorage.Spec.AllocationSets = []nnfv1alpha2.NnfStorageAllocationSetSpec{} // Iterate the Servers data elements to pull out the allocation sets for the server for i := range s.Spec.AllocationSets { - nnfAllocSet := nnfv1alpha1.NnfStorageAllocationSetSpec{} + nnfAllocSet := nnfv1alpha2.NnfStorageAllocationSetSpec{} nnfAllocSet.Name = s.Spec.AllocationSets[i].Label nnfAllocSet.Capacity = s.Spec.AllocationSets[i].AllocationSize @@ -668,12 +668,12 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * // If there are multiple allocations on the first MGTMDT node, split it out into two seperate // node entries. The first is a single allocation that will be used for the MGTMDT. The remaining // allocations on the node will be MDTs only. - node := nnfv1alpha1.NnfStorageAllocationNodes{Name: storage.Name, Count: 1} + node := nnfv1alpha2.NnfStorageAllocationNodes{Name: storage.Name, Count: 1} nnfAllocSet.Nodes = append(nnfAllocSet.Nodes, node) - node = nnfv1alpha1.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount - 1} + node = nnfv1alpha2.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount - 1} nnfAllocSet.Nodes = append(nnfAllocSet.Nodes, node) } else { - node := nnfv1alpha1.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} + node := nnfv1alpha2.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} nnfAllocSet.Nodes = append(nnfAllocSet.Nodes, node) } } @@ -703,7 +703,7 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool string) (corev1.ObjectReference, string, error) { persistentStorageList := &dwsv1alpha2.PersistentStorageInstanceList{} - if err := r.List(ctx, persistentStorageList, client.MatchingLabels(map[string]string{nnfv1alpha1.StandaloneMGTLabel: pool})); err != nil { + if err := r.List(ctx, persistentStorageList, client.MatchingLabels(map[string]string{nnfv1alpha2.StandaloneMGTLabel: pool})); err != nil { return corev1.ObjectReference{}, "", err } @@ -715,7 +715,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s healthyMgts := make(map[string]corev1.ObjectReference) for _, persistentStorage := range persistentStorageList.Items { // Find the NnfStorage for the PersistentStorage so we can check its status and get the MGT LNid - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.Name, Namespace: persistentStorage.Namespace, @@ -810,14 +810,14 @@ func (r *NnfWorkflowReconciler) findLustreFileSystemForPath(ctx context.Context, return nil } -func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha1.NnfStorage, workflow *dwsv1alpha2.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha2.WorkflowState, log logr.Logger) (*nnfv1alpha1.NnfAccess, error) { +func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha2.NnfStorage, workflow *dwsv1alpha2.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha2.WorkflowState, log logr.Logger) (*nnfv1alpha2.NnfAccess, error) { pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, parentDwIndex) nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, parentDwIndex) + "-servers", Namespace: workflow.Namespace, @@ -830,9 +830,9 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st dwsv1alpha2.AddOwnerLabels(access, workflow) addPinnedStorageProfileLabel(access, nnfStorageProfile) addDirectiveIndexLabel(access, index) - nnfv1alpha1.AddDataMovementTeardownStateLabel(access, teardownState) + nnfv1alpha2.AddDataMovementTeardownStateLabel(access, teardownState) - access.Spec = nnfv1alpha1.NnfAccessSpec{ + access.Spec = nnfv1alpha2.NnfAccessSpec{ DesiredState: "mounted", TeardownState: teardownState, Target: "all", @@ -844,7 +844,7 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st // NNF Storage is Namespaced Name to the servers object StorageReference: corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), Name: storage.Name, Namespace: storage.Namespace, }, @@ -873,7 +873,7 @@ func (r *NnfWorkflowReconciler) getDirectiveFileSystemType(ctx context.Context, return dwArgs["type"], nil case "persistentdw": name, namespace := getStorageReferenceNameFromWorkflowActual(workflow, index) - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -1046,10 +1046,10 @@ func splitStagingArgumentIntoNameAndPath(arg string) (string, string) { return name, path } -func getRabbitRelativePath(fsType string, storageRef *corev1.ObjectReference, access *nnfv1alpha1.NnfAccess, path, namespace string, index int) string { +func getRabbitRelativePath(fsType string, storageRef *corev1.ObjectReference, access *nnfv1alpha2.NnfAccess, path, namespace string, index int) string { relPath := path - if storageRef.Kind == reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name() { + if storageRef.Kind == reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name() { switch fsType { case "xfs", "gfs2": idxMount := getIndexMountDir(namespace, index) @@ -1115,7 +1115,7 @@ func addDirectiveIndexLabel(object metav1.Object, index int) { labels = make(map[string]string) } - labels[nnfv1alpha1.DirectiveIndexLabel] = strconv.Itoa(index) + labels[nnfv1alpha2.DirectiveIndexLabel] = strconv.Itoa(index) object.SetLabels(labels) } @@ -1125,7 +1125,7 @@ func getDirectiveIndexLabel(object metav1.Object) string { return "" } - return labels[nnfv1alpha1.DirectiveIndexLabel] + return labels[nnfv1alpha2.DirectiveIndexLabel] } func setTargetOwnerUIDLabel(object metav1.Object, value string) { @@ -1134,7 +1134,7 @@ func setTargetOwnerUIDLabel(object metav1.Object, value string) { labels = make(map[string]string) } - labels[nnfv1alpha1.TargetOwnerUidLabel] = value + labels[nnfv1alpha2.TargetOwnerUidLabel] = value object.SetLabels(labels) } @@ -1144,7 +1144,7 @@ func getTargetOwnerUIDLabel(object metav1.Object) string { return "" } - return labels[nnfv1alpha1.TargetOwnerUidLabel] + return labels[nnfv1alpha2.TargetOwnerUidLabel] } func setTargetDirectiveIndexLabel(object metav1.Object, value string) { @@ -1153,7 +1153,7 @@ func setTargetDirectiveIndexLabel(object metav1.Object, value string) { labels = make(map[string]string) } - labels[nnfv1alpha1.TargetDirectiveIndexLabel] = value + labels[nnfv1alpha2.TargetDirectiveIndexLabel] = value object.SetLabels(labels) } @@ -1163,7 +1163,7 @@ func getTargetDirectiveIndexLabel(object metav1.Object) string { return "" } - return labels[nnfv1alpha1.TargetDirectiveIndexLabel] + return labels[nnfv1alpha2.TargetDirectiveIndexLabel] } func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int, accessSuffix string) (*result, error) { @@ -1171,7 +1171,7 @@ func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, panic(fmt.Sprint("unhandled NnfAccess suffix", accessSuffix)) } - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index) + "-" + accessSuffix, Namespace: workflow.Namespace, @@ -1185,7 +1185,7 @@ func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, return nil, client.IgnoreNotFound(err) } - teardownState, found := access.Labels[nnfv1alpha1.DataMovementTeardownStateLabel] + teardownState, found := access.Labels[nnfv1alpha2.DataMovementTeardownStateLabel] if !found || dwsv1alpha2.WorkflowState(teardownState) == workflow.Status.State { if access.Spec.DesiredState != "unmounted" { access.Spec.DesiredState = "unmounted" @@ -1224,7 +1224,7 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex for _, suffix := range accessSuffixes { - access := &nnfv1alpha1.NnfAccess{ + access := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index) + suffix, Namespace: workflow.Namespace, @@ -1249,7 +1249,7 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex } else { // When unmounting, we are conditionally dependent on the workflow state matching the // state of the teardown label, if found. - teardownState, found := access.Labels[nnfv1alpha1.DataMovementTeardownStateLabel] + teardownState, found := access.Labels[nnfv1alpha2.DataMovementTeardownStateLabel] if !found || dwsv1alpha2.WorkflowState(teardownState) == workflow.Status.State { if access.Status.State != "unmounted" || !access.Status.Ready { return Requeue("pending unmount").withObject(access), nil @@ -1363,7 +1363,7 @@ func (r *NnfWorkflowReconciler) userContainerHandler(ctx context.Context, workfl profile: profile, nnfNodes: nnfNodes, volumes: volumes, - username: nnfv1alpha1.ContainerUser, + username: nnfv1alpha2.ContainerUser, uid: int64(workflow.Spec.UserID), gid: int64(workflow.Spec.GroupID), index: index, @@ -1400,7 +1400,7 @@ func (r *NnfWorkflowReconciler) createContainerService(ctx context.Context, work } service.Spec.Selector = map[string]string{ - nnfv1alpha1.ContainerLabel: workflow.Name, + nnfv1alpha2.ContainerLabel: workflow.Name, } service.Spec.ClusterIP = corev1.ClusterIPNone @@ -1611,7 +1611,7 @@ func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow * // Add workflow matchLabels + directive index (if desired) matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) if index >= 0 { - matchLabels[nnfv1alpha1.DirectiveIndexLabel] = strconv.Itoa(index) + matchLabels[nnfv1alpha2.DirectiveIndexLabel] = strconv.Itoa(index) } // Delete MPIJobs @@ -1879,7 +1879,7 @@ func (r *NnfWorkflowReconciler) getMPIJobs(ctx context.Context, workflow *dwsv1a // Get the MPIJobs for this workflow and directive index matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) if index >= 0 { - matchLabels[nnfv1alpha1.DirectiveIndexLabel] = strconv.Itoa(index) + matchLabels[nnfv1alpha2.DirectiveIndexLabel] = strconv.Itoa(index) } jobList := &mpiv2beta1.MPIJobList{} @@ -1894,7 +1894,7 @@ func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow * // Get the jobs for this workflow and directive index matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) if index >= 0 { - matchLabels[nnfv1alpha1.DirectiveIndexLabel] = strconv.Itoa(index) + matchLabels[nnfv1alpha2.DirectiveIndexLabel] = strconv.Itoa(index) } jobList := &batchv1.JobList{} @@ -1906,7 +1906,7 @@ func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow * } // Create a list of volumes to be mounted inside of the containers based on the DW_JOB/DW_PERSISTENT arguments -func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, profile *nnfv1alpha1.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { +func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, profile *nnfv1alpha2.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { volumes := []nnfContainerVolume{} for arg, val := range dwArgs { @@ -1972,7 +1972,7 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo return nil, nil, dwsv1alpha2.NewResourceError("could not retrieve the directive breakdown for '%s'", vol.directiveName).WithMajor() } - nnfAccess := &nnfv1alpha1.NnfAccess{ + nnfAccess := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Name + "-" + strconv.Itoa(vol.directiveIndex) + "-servers", Namespace: workflow.Namespace, @@ -2017,7 +2017,7 @@ func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow // Add a port allocation request to the manager for the number of ports specified by the // container profile - pm.Spec.Allocations = append(pm.Spec.Allocations, nnfv1alpha1.NnfPortManagerAllocationSpec{ + pm.Spec.Allocations = append(pm.Spec.Allocations, nnfv1alpha2.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{ Name: workflow.Name, Namespace: workflow.Namespace, @@ -2058,14 +2058,14 @@ func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflo for _, alloc := range pm.Status.Allocations { if alloc.Requester != nil && alloc.Requester.UID == workflow.UID { - if alloc.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInUse && len(alloc.Ports) == int(profile.Data.NumPorts) { + if alloc.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInUse && len(alloc.Ports) == int(profile.Data.NumPorts) { // Add workflow env var for the ports name, val := getContainerPortsEnvVar(alloc.Ports) workflow.Status.Env[name] = val return nil, nil // done - } else if alloc.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInvalidConfiguration { + } else if alloc.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInvalidConfiguration { return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("could not request ports for container workflow: Invalid NnfPortManager configuration").WithFatal().WithUser() - } else if alloc.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInsufficientResources { + } else if alloc.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInsufficientResources { return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("could not request ports for container workflow: InsufficientResources").WithFatal() } } @@ -2079,11 +2079,11 @@ func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflo // Retrieve the default NnfPortManager for user containers. Allow a client to be passed in as this // is meant to be used by reconcilers or container helpers. -func getContainerPortManager(ctx context.Context, cl client.Client) (*nnfv1alpha1.NnfPortManager, error) { +func getContainerPortManager(ctx context.Context, cl client.Client) (*nnfv1alpha2.NnfPortManager, error) { portManagerName := os.Getenv("NNF_PORT_MANAGER_NAME") portManagerNamespace := os.Getenv("NNF_PORT_MANAGER_NAMESPACE") - pm := &nnfv1alpha1.NnfPortManager{ + pm := &nnfv1alpha2.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: portManagerName, Namespace: portManagerNamespace, @@ -2112,7 +2112,7 @@ func (r *NnfWorkflowReconciler) releaseContainerPorts(ctx context.Context, workf // Find the allocation in the Status for _, alloc := range pm.Status.Allocations { - if alloc.Requester.UID == workflow.UID && alloc.Status == nnfv1alpha1.NnfPortManagerAllocationStatusInUse { + if alloc.Requester.UID == workflow.UID && alloc.Status == nnfv1alpha2.NnfPortManagerAllocationStatusInUse { found = true break } diff --git a/internal/controller/nnf_workflow_controller_helpers_test.go b/internal/controller/nnf_workflow_controller_helpers_test.go index 065ad1bb2..476b5af03 100644 --- a/internal/controller/nnf_workflow_controller_helpers_test.go +++ b/internal/controller/nnf_workflow_controller_helpers_test.go @@ -4,7 +4,7 @@ import ( "reflect" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -50,13 +50,13 @@ var _ = Describe("NnfWorkflowControllerHelpers", func() { DescribeTable("Test NNF filesystems (NnfAccess)", func(fsType, path, output string) { // We can hardwire these fields and assume the same mountpath/mountpathprefix, index, namespace, etc - objRef := corev1.ObjectReference{Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name()} + objRef := corev1.ObjectReference{Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name()} mntPath := "/mnt/nnf/123456-0/" idx := 0 ns := "slushy44" - access := nnfv1alpha1.NnfAccess{ - Spec: nnfv1alpha1.NnfAccessSpec{ + access := nnfv1alpha2.NnfAccess{ + Spec: nnfv1alpha2.NnfAccessSpec{ MountPath: mntPath, MountPathPrefix: mntPath, }, diff --git a/internal/controller/nnf_workflow_controller_test.go b/internal/controller/nnf_workflow_controller_test.go index ab2994ac3..8bceaab47 100644 --- a/internal/controller/nnf_workflow_controller_test.go +++ b/internal/controller/nnf_workflow_controller_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -41,7 +41,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var ( @@ -58,9 +58,9 @@ var _ = Describe("NNF Workflow Unit Tests", func() { key types.NamespacedName workflow *dwsv1alpha2.Workflow setup sync.Once - storageProfile *nnfv1alpha1.NnfStorageProfile - dmProfile *nnfv1alpha1.NnfDataMovementProfile - nnfNode *nnfv1alpha1.NnfNode + storageProfile *nnfv1alpha2.NnfStorageProfile + dmProfile *nnfv1alpha2.NnfDataMovementProfile + nnfNode *nnfv1alpha2.NnfNode namespace *corev1.Namespace persistentStorageName string ) @@ -73,14 +73,14 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), namespace)).To(Succeed()) - nnfNode = &nnfv1alpha1.NnfNode{ + nnfNode = &nnfv1alpha2.NnfNode{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: "rabbit-node", }, - Spec: nnfv1alpha1.NnfNodeSpec{ - State: nnfv1alpha1.ResourceEnable, + Spec: nnfv1alpha2.NnfNodeSpec{ + State: nnfv1alpha2.ResourceEnable, }, } Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) @@ -145,13 +145,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), dmProfile)).To(Succeed()) - dmProfExpected := &nnfv1alpha1.NnfDataMovementProfile{} + dmProfExpected := &nnfv1alpha2.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmProfile), dmProfExpected) }).ShouldNot(Succeed()) @@ -192,15 +192,15 @@ var _ = Describe("NNF Workflow Unit Tests", func() { // operate. // An alternative is to create a workflow with 'create_persistent' // as its directive and actually create the full-blown persistent instance.. (painful) - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: workflow.Namespace, }, - Spec: nnfv1alpha1.NnfStorageSpec{ + Spec: nnfv1alpha2.NnfStorageSpec{ FileSystemType: fsType, - AllocationSets: []nnfv1alpha1.NnfStorageAllocationSetSpec{}, + AllocationSets: []nnfv1alpha2.NnfStorageAllocationSetSpec{}, }, } Expect(k8sClient.Create(context.TODO(), nnfStorage)).To(Succeed()) @@ -214,7 +214,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(psi), psi)).To(Succeed()) Expect(k8sClient.Delete(context.TODO(), psi)).Should(Succeed()) - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: workflow.Namespace}, } @@ -263,7 +263,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("More than one default profile", func() { - var storageProfile2 *nnfv1alpha1.NnfStorageProfile + var storageProfile2 *nnfv1alpha2.NnfStorageProfile BeforeEach(func() { // The second profile will get a different name via the call to uuid. @@ -273,7 +273,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile2)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile2), profExpected) }).ShouldNot(Succeed()) @@ -301,7 +301,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("Positive tests for storage profiles", func() { - profiles := []*nnfv1alpha1.NnfStorageProfile{} + profiles := []*nnfv1alpha2.NnfStorageProfile{} profNames := []string{} BeforeEach(func() { @@ -436,7 +436,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("More than one default profile", func() { - var dmProfile2 *nnfv1alpha1.NnfDataMovementProfile + var dmProfile2 *nnfv1alpha2.NnfDataMovementProfile BeforeEach(func() { // The second profile will get a different name via the call to uuid. @@ -446,7 +446,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), dmProfile2)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfDataMovementProfile{} + profExpected := &nnfv1alpha2.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmProfile2), profExpected) }).ShouldNot(Succeed()) @@ -474,7 +474,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("Positive tests for data movement profiles", func() { - profiles := []*nnfv1alpha1.NnfDataMovementProfile{} + profiles := []*nnfv1alpha2.NnfDataMovementProfile{} profNames := []string{} var lustre *lusv1beta1.LustreFileSystem @@ -665,7 +665,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("Using copy_in directives", func() { var ( - dmm *nnfv1alpha1.NnfDataMovementManager + dmm *nnfv1alpha2.NnfDataMovementManager ) JustBeforeEach(func() { @@ -693,18 +693,18 @@ var _ = Describe("NNF Workflow Unit Tests", func() { BeforeEach(func() { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha1.DataMovementNamespace, + Name: nnfv1alpha2.DataMovementNamespace, }, } k8sClient.Create(context.TODO(), ns) // Ignore errors as namespace may be created from other tests - dmm = &nnfv1alpha1.NnfDataMovementManager{ + dmm = &nnfv1alpha2.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha1.DataMovementManagerName, - Namespace: nnfv1alpha1.DataMovementNamespace, + Name: nnfv1alpha2.DataMovementManagerName, + Namespace: nnfv1alpha2.DataMovementNamespace, }, - Spec: nnfv1alpha1.NnfDataMovementManagerSpec{ + Spec: nnfv1alpha2.NnfDataMovementManagerSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -714,7 +714,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }, }, }, - Status: nnfv1alpha1.NnfDataMovementManagerStatus{ + Status: nnfv1alpha2.NnfDataMovementManagerStatus{ Ready: true, }, } @@ -766,10 +766,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }).Should(Succeed(), "update to DataIn") By("creates the data movement resource") - dm := &nnfv1alpha1.NnfDataMovement{ + dm := &nnfv1alpha2.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", workflow.Name, 1), - Namespace: nnfv1alpha1.DataMovementNamespace, + Namespace: nnfv1alpha2.DataMovementNamespace, }, } @@ -790,19 +790,19 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(dm.Spec.Destination.StorageReference).ToNot(BeNil()) Expect(dm.Spec.Destination.StorageReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name()), "Name": Equal(fmt.Sprintf("%s-%d", workflow.Name, 0)), "Namespace": Equal(workflow.Namespace), })) Expect(dm.Spec.ProfileReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha1.NnfDataMovementProfile{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha2.NnfDataMovementProfile{}).Name()), "Name": Equal(indexedResourceName(workflow, 1)), "Namespace": Equal(corev1.NamespaceDefault), }, )) - Expect(dm.GetLabels()[nnfv1alpha1.DataMovementInitiatorLabel]).To(Equal("copy_in")) + Expect(dm.GetLabels()[nnfv1alpha2.DataMovementInitiatorLabel]).To(Equal("copy_in")) }) }) @@ -846,10 +846,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "transition desired state to DataIn") - dm := &nnfv1alpha1.NnfDataMovement{ + dm := &nnfv1alpha2.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, 1), - Namespace: nnfv1alpha1.DataMovementNamespace, + Namespace: nnfv1alpha2.DataMovementNamespace, }, } @@ -870,18 +870,18 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(dm.Spec.Destination.StorageReference).ToNot(BeNil()) Expect(dm.Spec.Destination.StorageReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name()), "Name": Equal(persistentStorageName), "Namespace": Equal(workflow.Namespace), })) Expect(dm.Spec.ProfileReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha1.NnfDataMovementProfile{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha2.NnfDataMovementProfile{}).Name()), "Name": Equal(indexedResourceName(workflow, 1)), "Namespace": Equal(corev1.NamespaceDefault), }, )) - Expect(dm.GetLabels()[nnfv1alpha1.DataMovementInitiatorLabel]).To(Equal("copy_in")) + Expect(dm.GetLabels()[nnfv1alpha2.DataMovementInitiatorLabel]).To(Equal("copy_in")) }) }) @@ -1283,7 +1283,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup }).Should(BeTrue(), "waiting for ready after setup") - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: servers.Name, Namespace: servers.Namespace, @@ -1365,8 +1365,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { createGlobalLustre bool globalLustre *lusv1beta1.LustreFileSystem - containerProfile *nnfv1alpha1.NnfContainerProfile - containerProfileStorages []nnfv1alpha1.NnfContainerProfileStorage + containerProfile *nnfv1alpha2.NnfContainerProfile + containerProfileStorages []nnfv1alpha2.NnfContainerProfileStorage createContainerProfile bool ) @@ -1503,7 +1503,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Context("when an optional storage in the container profile is not present in the container arguments", func() { BeforeEach(func() { - containerProfileStorages = []nnfv1alpha1.NnfContainerProfileStorage{ + containerProfileStorages = []nnfv1alpha2.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: false}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, @@ -1548,7 +1548,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Context("when a required storage in the container profile is not present in the arguments", func() { BeforeEach(func() { - containerProfileStorages = []nnfv1alpha1.NnfContainerProfileStorage{ + containerProfileStorages = []nnfv1alpha2.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: false}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, } @@ -1592,7 +1592,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { } }) - buildContainerProfile := func(storages []nnfv1alpha1.NnfContainerProfileStorage) { + buildContainerProfile := func(storages []nnfv1alpha2.NnfContainerProfileStorage) { By("Creating a profile with specific storages") tempProfile := basicNnfContainerProfile("restricted-"+uuid.NewString()[:8], storages) containerProfile = createNnfContainerProfile(tempProfile, true) @@ -1609,7 +1609,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { } DescribeTable("should not go to Proposal Ready", - func(argIdx int, storages []nnfv1alpha1.NnfContainerProfileStorage) { + func(argIdx int, storages []nnfv1alpha2.NnfContainerProfileStorage) { buildContainerProfile(storages) buildContainerWorkflowWithArgs(storageArgsList[argIdx]) Eventually(func(g Gomega) bool { @@ -1620,19 +1620,19 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }, Entry("when DW_JOB_ not present in the container profile", 0, - []nnfv1alpha1.NnfContainerProfileStorage{ + []nnfv1alpha2.NnfContainerProfileStorage{ {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, }, ), Entry("when DW_PERSISTENT_ not present in the container profile", 1, - []nnfv1alpha1.NnfContainerProfileStorage{ + []nnfv1alpha2.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, }, ), Entry("when DW_GLOBAL_ not present in the container profile", 2, - []nnfv1alpha1.NnfContainerProfileStorage{ + []nnfv1alpha2.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: true}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, }, @@ -1738,7 +1738,7 @@ var _ = Describe("NnfStorageProfile Webhook test", func() { }) }) -func WaitForDMMReady(dmm *nnfv1alpha1.NnfDataMovementManager) { +func WaitForDMMReady(dmm *nnfv1alpha2.NnfDataMovementManager) { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmm), dmm)).To(Succeed()) if !dmm.Status.Ready { diff --git a/internal/controller/nnfcontainerprofile_helpers.go b/internal/controller/nnfcontainerprofile_helpers.go index 7dd5c5f27..8a89c7144 100644 --- a/internal/controller/nnfcontainerprofile_helpers.go +++ b/internal/controller/nnfcontainerprofile_helpers.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -32,11 +32,11 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/go-logr/logr" ) -func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha1.NnfContainerProfile, error) { +func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha2.NnfContainerProfile, error) { profile, err := findPinnedContainerProfile(ctx, clnt, workflow, index) if err != nil { return nil, err @@ -49,8 +49,8 @@ func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv return profile, nil } -func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha1.NnfContainerProfile, error) { - profile := &nnfv1alpha1.NnfContainerProfile{ +func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha2.NnfContainerProfile, error) { + profile := &nnfv1alpha2.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, @@ -68,7 +68,7 @@ func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflo return profile, nil } -func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha1.NnfContainerProfile, error) { +func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha2.NnfContainerProfile, error) { args, err := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err != nil { return nil, err @@ -79,7 +79,7 @@ func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dws return nil, fmt.Errorf("container directive '%s' has no profile key", workflow.Spec.DWDirectives[index]) } - profile := &nnfv1alpha1.NnfContainerProfile{ + profile := &nnfv1alpha2.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: os.Getenv("NNF_CONTAINER_PROFILE_NAMESPACE"), @@ -121,7 +121,7 @@ func createPinnedContainerProfileIfNecessary(ctx context.Context, clnt client.Cl return err } - pinnedProfile := &nnfv1alpha1.NnfContainerProfile{ + pinnedProfile := &nnfv1alpha2.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, diff --git a/internal/controller/nnfcontainerprofile_test.go b/internal/controller/nnfcontainerprofile_test.go index 6c7583b34..d8f146390 100644 --- a/internal/controller/nnfcontainerprofile_test.go +++ b/internal/controller/nnfcontainerprofile_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -31,17 +31,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) // createNnfContainerProfile creates the given profile in the "default" namespace. // When expectSuccess=false, we expect to find that it was failed by the webhook. -func createNnfContainerProfile(containerProfile *nnfv1alpha1.NnfContainerProfile, expectSuccess bool) *nnfv1alpha1.NnfContainerProfile { +func createNnfContainerProfile(containerProfile *nnfv1alpha2.NnfContainerProfile, expectSuccess bool) *nnfv1alpha2.NnfContainerProfile { // Place NnfContainerProfiles in "default" for the test environment. containerProfile.ObjectMeta.Namespace = corev1.NamespaceDefault profKey := client.ObjectKeyFromObject(containerProfile) - profExpected := &nnfv1alpha1.NnfContainerProfile{} + profExpected := &nnfv1alpha2.NnfContainerProfile{} err := k8sClient.Get(context.TODO(), profKey, profExpected) Expect(err).ToNot(BeNil()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -62,22 +62,22 @@ func createNnfContainerProfile(containerProfile *nnfv1alpha1.NnfContainerProfile } // basicNnfContainerProfile creates a simple NnfContainerProfile struct. -func basicNnfContainerProfile(name string, storages []nnfv1alpha1.NnfContainerProfileStorage) *nnfv1alpha1.NnfContainerProfile { +func basicNnfContainerProfile(name string, storages []nnfv1alpha2.NnfContainerProfileStorage) *nnfv1alpha2.NnfContainerProfile { // default storages if not supplied, optional by default if len(storages) == 0 { - storages = []nnfv1alpha1.NnfContainerProfileStorage{ + storages = []nnfv1alpha2.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: true}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, } } - containerProfile := &nnfv1alpha1.NnfContainerProfile{ + containerProfile := &nnfv1alpha2.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Data: nnfv1alpha1.NnfContainerProfileData{ + Data: nnfv1alpha2.NnfContainerProfileData{ Pinned: false, Storages: storages, Spec: &corev1.PodSpec{ @@ -92,7 +92,7 @@ func basicNnfContainerProfile(name string, storages []nnfv1alpha1.NnfContainerPr } // createBasicNnfContainerProfile creates a simple default container profile. -func createBasicNnfContainerProfile(storages []nnfv1alpha1.NnfContainerProfileStorage) *nnfv1alpha1.NnfContainerProfile { +func createBasicNnfContainerProfile(storages []nnfv1alpha2.NnfContainerProfileStorage) *nnfv1alpha2.NnfContainerProfile { containerProfile := basicNnfContainerProfile("sample-"+uuid.NewString()[:8], storages) return createNnfContainerProfile(containerProfile, true) } diff --git a/internal/controller/nnfdatamovementprofile_helpers.go b/internal/controller/nnfdatamovementprofile_helpers.go index f81bfc500..4bc8e96ce 100644 --- a/internal/controller/nnfdatamovementprofile_helpers.go +++ b/internal/controller/nnfdatamovementprofile_helpers.go @@ -32,14 +32,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) // findProfileToUse verifies a NnfDataMovementProfile named in the directive or verifies that a default can be found. -func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha1.NnfDataMovementProfile, error) { +func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha2.NnfDataMovementProfile, error) { var profileName string - NnfDataMovementProfile := &nnfv1alpha1.NnfDataMovementProfile{} + NnfDataMovementProfile := &nnfv1alpha2.NnfDataMovementProfile{} profileNamespace := os.Getenv("NNF_DM_PROFILE_NAMESPACE") @@ -47,7 +47,7 @@ func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string // that a default profile can be found. profileName, present := args["profile"] if present == false { - NnfDataMovementProfiles := &nnfv1alpha1.NnfDataMovementProfileList{} + NnfDataMovementProfiles := &nnfv1alpha2.NnfDataMovementProfileList{} if err := clnt.List(ctx, NnfDataMovementProfiles, &client.ListOptions{Namespace: profileNamespace}); err != nil { return nil, err } @@ -78,9 +78,9 @@ func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string } // findPinnedProfile finds the specified pinned profile. -func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha1.NnfDataMovementProfile, error) { +func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha2.NnfDataMovementProfile, error) { - NnfDataMovementProfile := &nnfv1alpha1.NnfDataMovementProfile{} + NnfDataMovementProfile := &nnfv1alpha2.NnfDataMovementProfile{} err := clnt.Get(ctx, types.NamespacedName{Namespace: namespace, Name: pinnedName}, NnfDataMovementProfile) if err != nil { return nil, err @@ -92,7 +92,7 @@ func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace stri } // createPinnedProfile finds the specified profile and makes a pinned copy of it. -func createPinnedDMProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha1.NnfDataMovementProfile, error) { +func createPinnedDMProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha2.NnfDataMovementProfile, error) { // If we've already pinned a profile, then we're done and // we no longer have a use for the original profile. diff --git a/internal/controller/nnfdatamovementprofile_test.go b/internal/controller/nnfdatamovementprofile_test.go index f22855666..1ed831e0d 100644 --- a/internal/controller/nnfdatamovementprofile_test.go +++ b/internal/controller/nnfdatamovementprofile_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -30,17 +30,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) // createNnfDataMovementProfile creates the given profile in the "default" namespace. // When expectSuccess=false, we expect to find that it was failed by the webhook. -func createNnfDataMovementProfile(DataMovementProfile *nnfv1alpha1.NnfDataMovementProfile, expectSuccess bool) *nnfv1alpha1.NnfDataMovementProfile { +func createNnfDataMovementProfile(DataMovementProfile *nnfv1alpha2.NnfDataMovementProfile, expectSuccess bool) *nnfv1alpha2.NnfDataMovementProfile { // Place NnfDataMovementProfiles in "default" for the test environment. DataMovementProfile.ObjectMeta.Namespace = corev1.NamespaceDefault profKey := client.ObjectKeyFromObject(DataMovementProfile) - profExpected := &nnfv1alpha1.NnfDataMovementProfile{} + profExpected := &nnfv1alpha2.NnfDataMovementProfile{} err := k8sClient.Get(context.TODO(), profKey, profExpected) Expect(err).ToNot(BeNil()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -61,8 +61,8 @@ func createNnfDataMovementProfile(DataMovementProfile *nnfv1alpha1.NnfDataMoveme } // basicNnfDataMovementProfile creates a simple NnfDataMovementProfile struct. -func basicNnfDataMovementProfile(name string) *nnfv1alpha1.NnfDataMovementProfile { - DataMovementProfile := &nnfv1alpha1.NnfDataMovementProfile{ +func basicNnfDataMovementProfile(name string) *nnfv1alpha2.NnfDataMovementProfile { + DataMovementProfile := &nnfv1alpha2.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -71,14 +71,14 @@ func basicNnfDataMovementProfile(name string) *nnfv1alpha1.NnfDataMovementProfil } // createBasicDefaultNnfDataMovementProfile creates a simple default storage profile. -func createBasicDefaultNnfDataMovementProfile() *nnfv1alpha1.NnfDataMovementProfile { +func createBasicDefaultNnfDataMovementProfile() *nnfv1alpha2.NnfDataMovementProfile { DataMovementProfile := basicNnfDataMovementProfile("durable-" + uuid.NewString()[:8]) DataMovementProfile.Data.Default = true return createNnfDataMovementProfile(DataMovementProfile, true) } // createBasicDefaultNnfDataMovementProfile creates a simple default storage profile. -func createBasicPinnedNnfDataMovementProfile() *nnfv1alpha1.NnfDataMovementProfile { +func createBasicPinnedNnfDataMovementProfile() *nnfv1alpha2.NnfDataMovementProfile { DataMovementProfile := basicNnfDataMovementProfile("durable-" + uuid.NewString()[:8]) DataMovementProfile.Data.Pinned = true return createNnfDataMovementProfile(DataMovementProfile, true) diff --git a/internal/controller/nnfstorageprofile_helpers.go b/internal/controller/nnfstorageprofile_helpers.go index 8a2206f32..7cf701792 100644 --- a/internal/controller/nnfstorageprofile_helpers.go +++ b/internal/controller/nnfstorageprofile_helpers.go @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -32,14 +32,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) // findProfileToUse verifies a NnfStorageProfile named in the directive or verifies that a default can be found. -func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha1.NnfStorageProfile, error) { +func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha2.NnfStorageProfile, error) { var profileName string - nnfStorageProfile := &nnfv1alpha1.NnfStorageProfile{} + nnfStorageProfile := &nnfv1alpha2.NnfStorageProfile{} profileNamespace := os.Getenv("NNF_STORAGE_PROFILE_NAMESPACE") @@ -47,7 +47,7 @@ func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]s // that a default profile can be found. profileName, present := args["profile"] if present == false { - nnfStorageProfiles := &nnfv1alpha1.NnfStorageProfileList{} + nnfStorageProfiles := &nnfv1alpha2.NnfStorageProfileList{} if err := clnt.List(ctx, nnfStorageProfiles, &client.ListOptions{Namespace: profileNamespace}); err != nil { return nil, err } @@ -78,9 +78,9 @@ func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]s } // findPinnedProfile finds the specified pinned profile. -func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha1.NnfStorageProfile, error) { +func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha2.NnfStorageProfile, error) { - nnfStorageProfile := &nnfv1alpha1.NnfStorageProfile{} + nnfStorageProfile := &nnfv1alpha2.NnfStorageProfile{} err := clnt.Get(ctx, types.NamespacedName{Namespace: namespace, Name: pinnedName}, nnfStorageProfile) if err != nil { return nil, err @@ -92,7 +92,7 @@ func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string } // createPinnedProfile finds the specified profile and makes a pinned copy of it. -func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha1.NnfStorageProfile, error) { +func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha2.NnfStorageProfile, error) { // If we've already pinned a profile, then we're done and // we no longer have a use for the original profile. @@ -134,32 +134,32 @@ func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *ru // addPinnedStorageProfileLabel adds name/namespace labels to a resource to indicate // which pinned storage profile is being used with that resource. -func addPinnedStorageProfileLabel(object metav1.Object, nnfStorageProfile *nnfv1alpha1.NnfStorageProfile) { +func addPinnedStorageProfileLabel(object metav1.Object, nnfStorageProfile *nnfv1alpha2.NnfStorageProfile) { labels := object.GetLabels() if labels == nil { labels = make(map[string]string) } - labels[nnfv1alpha1.PinnedStorageProfileLabelName] = nnfStorageProfile.GetName() - labels[nnfv1alpha1.PinnedStorageProfileLabelNameSpace] = nnfStorageProfile.GetNamespace() + labels[nnfv1alpha2.PinnedStorageProfileLabelName] = nnfStorageProfile.GetName() + labels[nnfv1alpha2.PinnedStorageProfileLabelNameSpace] = nnfStorageProfile.GetNamespace() object.SetLabels(labels) } // getPinnedStorageProfileFromLabel finds the pinned storage profile via the labels on the // specified resource. -func getPinnedStorageProfileFromLabel(ctx context.Context, clnt client.Client, object metav1.Object) (*nnfv1alpha1.NnfStorageProfile, error) { +func getPinnedStorageProfileFromLabel(ctx context.Context, clnt client.Client, object metav1.Object) (*nnfv1alpha2.NnfStorageProfile, error) { labels := object.GetLabels() if labels == nil { return nil, dwsv1alpha2.NewResourceError("unable to find labels").WithFatal() } - pinnedName, okName := labels[nnfv1alpha1.PinnedStorageProfileLabelName] + pinnedName, okName := labels[nnfv1alpha2.PinnedStorageProfileLabelName] if !okName { - return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha1.PinnedStorageProfileLabelName).WithFatal() + return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha2.PinnedStorageProfileLabelName).WithFatal() } - pinnedNamespace, okNamespace := labels[nnfv1alpha1.PinnedStorageProfileLabelNameSpace] + pinnedNamespace, okNamespace := labels[nnfv1alpha2.PinnedStorageProfileLabelNameSpace] if !okNamespace { - return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha1.PinnedStorageProfileLabelNameSpace).WithFatal() + return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha2.PinnedStorageProfileLabelNameSpace).WithFatal() } return findPinnedProfile(ctx, clnt, pinnedNamespace, pinnedName) diff --git a/internal/controller/nnfstorageprofile_test.go b/internal/controller/nnfstorageprofile_test.go index c1f710945..195c04b7b 100644 --- a/internal/controller/nnfstorageprofile_test.go +++ b/internal/controller/nnfstorageprofile_test.go @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -30,17 +30,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) // createNnfStorageProfile creates the given profile in the "default" namespace. // When expectSuccess=false, we expect to find that it was failed by the webhook. -func createNnfStorageProfile(storageProfile *nnfv1alpha1.NnfStorageProfile, expectSuccess bool) *nnfv1alpha1.NnfStorageProfile { +func createNnfStorageProfile(storageProfile *nnfv1alpha2.NnfStorageProfile, expectSuccess bool) *nnfv1alpha2.NnfStorageProfile { // Place NnfStorageProfiles in "default" for the test environment. storageProfile.ObjectMeta.Namespace = corev1.NamespaceDefault profKey := client.ObjectKeyFromObject(storageProfile) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} err := k8sClient.Get(context.TODO(), profKey, profExpected) Expect(err).ToNot(BeNil()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -61,8 +61,8 @@ func createNnfStorageProfile(storageProfile *nnfv1alpha1.NnfStorageProfile, expe } // basicNnfStorageProfile creates a simple NnfStorageProfile struct. -func basicNnfStorageProfile(name string) *nnfv1alpha1.NnfStorageProfile { - storageProfile := &nnfv1alpha1.NnfStorageProfile{ +func basicNnfStorageProfile(name string) *nnfv1alpha2.NnfStorageProfile { + storageProfile := &nnfv1alpha2.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -71,14 +71,14 @@ func basicNnfStorageProfile(name string) *nnfv1alpha1.NnfStorageProfile { } // createBasicDefaultNnfStorageProfile creates a simple default storage profile. -func createBasicDefaultNnfStorageProfile() *nnfv1alpha1.NnfStorageProfile { +func createBasicDefaultNnfStorageProfile() *nnfv1alpha2.NnfStorageProfile { storageProfile := basicNnfStorageProfile("durable-" + uuid.NewString()[:8]) storageProfile.Data.Default = true return createNnfStorageProfile(storageProfile, true) } // createBasicDefaultNnfStorageProfile creates a simple default storage profile. -func createBasicPinnedNnfStorageProfile() *nnfv1alpha1.NnfStorageProfile { +func createBasicPinnedNnfStorageProfile() *nnfv1alpha2.NnfStorageProfile { storageProfile := basicNnfStorageProfile("durable-" + uuid.NewString()[:8]) storageProfile.Data.Pinned = true return createNnfStorageProfile(storageProfile, true) diff --git a/internal/controller/nnfsystemstorage_controller.go b/internal/controller/nnfsystemstorage_controller.go index 3f74e0f88..832bcf9ed 100644 --- a/internal/controller/nnfsystemstorage_controller.go +++ b/internal/controller/nnfsystemstorage_controller.go @@ -30,13 +30,16 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -68,7 +71,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req metrics.NnfSystemStorageReconcilesTotal.Inc() - nnfSystemStorage := &nnfv1alpha1.NnfSystemStorage{} + nnfSystemStorage := &nnfv1alpha2.NnfSystemStorage{} if err := r.Get(ctx, req.NamespacedName, nnfSystemStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -76,7 +79,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfSystemStorageStatus](nnfSystemStorage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha2.NnfSystemStorageStatus](nnfSystemStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nnfSystemStorage.Status.SetResourceErrorAndLog(err, log) }() @@ -161,7 +164,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req // Get the SystemConfiguration. If a SystemConfiguration is specified in the NnfSystemStorage, use that. // Otherwise, use the default/default SystemConfiguration. -func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) (*dwsv1alpha2.SystemConfiguration, error) { +func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) (*dwsv1alpha2.SystemConfiguration, error) { systemConfiguration := &dwsv1alpha2.SystemConfiguration{} if nnfSystemStorage.Spec.SystemConfiguration != (corev1.ObjectReference{}) { @@ -189,16 +192,16 @@ func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, // Get the StorageProfile specified in the spec. We don't look for the default profile, a profile must be // specified in the NnfSystemStorage spec, and it must be marked as pinned. -func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) (*nnfv1alpha1.NnfStorageProfile, error) { +func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) (*nnfv1alpha2.NnfStorageProfile, error) { if nnfSystemStorage.Spec.StorageProfile == (corev1.ObjectReference{}) { return nil, dwsv1alpha2.NewResourceError("StorageProfile must be specified").WithFatal() } - if nnfSystemStorage.Spec.StorageProfile.Kind != reflect.TypeOf(nnfv1alpha1.NnfStorageProfile{}).Name() { - return nil, dwsv1alpha2.NewResourceError("StorageProfile is not of kind '%s'", reflect.TypeOf(nnfv1alpha1.NnfStorageProfile{}).Name()).WithFatal() + if nnfSystemStorage.Spec.StorageProfile.Kind != reflect.TypeOf(nnfv1alpha2.NnfStorageProfile{}).Name() { + return nil, dwsv1alpha2.NewResourceError("StorageProfile is not of kind '%s'", reflect.TypeOf(nnfv1alpha2.NnfStorageProfile{}).Name()).WithFatal() } - storageProfile := &nnfv1alpha1.NnfStorageProfile{ + storageProfile := &nnfv1alpha2.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.Spec.StorageProfile.Name, Namespace: nnfSystemStorage.Spec.StorageProfile.Namespace, @@ -215,7 +218,7 @@ func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfS // Create a Servers resource with one allocation on each Rabbit. If the IncludeRabbits array is not // empty, only use those Rabbits. Otherwise, use all the Rabbits in the SystemConfiguration resource except // those specified in the ExcludeRabbits array. -func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) // Create a list of Rabbits to use @@ -254,6 +257,40 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste } } + // Look at the Storages resources an determine whether each of the Rabbits is enabled. If any are not, + // remove them from the list. + if nnfSystemStorage.Spec.ExcludeDisabledRabbits { + tempRabbitList := rabbitList[:0] + for _, rabbit := range rabbitList { + storage := &dwsv1alpha2.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: rabbit, + Namespace: corev1.NamespaceDefault, + }, + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { + return dwsv1alpha2.NewResourceError("could not get Storage '%v'", client.ObjectKeyFromObject(storage)).WithError(err) + } + + labels := storage.GetLabels() + if labels == nil { + continue + } + + if storageType := labels[dwsv1alpha2.StorageTypeLabel]; storageType != "Rabbit" { + continue + } + + if storage.Spec.State == dwsv1alpha2.DisabledState || storage.Status.Status != dwsv1alpha2.ReadyStatus { + continue + } + + tempRabbitList = append(tempRabbitList, rabbit) + } + rabbitList = tempRabbitList + } + // Use the Rabbit list to fill in the servers resource with one allocation per Rabbit servers := &dwsv1alpha2.Servers{ ObjectMeta: metav1.ObjectMeta{ @@ -301,7 +338,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste // in the servers resource and exclude any computes listed in ExcludeComputes. Additionally, the ComputesTarget field determines // which of the Rabbits computes to include: all, even, odd, or a custom list. This is done using the index of the compute node // in the SystemConfiguration. -func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) // Get a list of compute nodes to use @@ -338,13 +375,13 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst // Make a list of compute node index values based on the ComputesTarget field var indexList []int switch nnfSystemStorage.Spec.ComputesTarget { - case nnfv1alpha1.ComputesTargetAll: + case nnfv1alpha2.ComputesTargetAll: indexList = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - case nnfv1alpha1.ComputesTargetEven: + case nnfv1alpha2.ComputesTargetEven: indexList = []int{0, 2, 4, 6, 8, 10, 12, 14} - case nnfv1alpha1.ComputesTargetOdd: + case nnfv1alpha2.ComputesTargetOdd: indexList = []int{1, 3, 5, 7, 9, 11, 13, 15} - case nnfv1alpha1.ComputesTargetPattern: + case nnfv1alpha2.ComputesTargetPattern: indexList = append([]int(nil), nnfSystemStorage.Spec.ComputesPattern...) default: return dwsv1alpha2.NewResourceError("undexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() @@ -423,7 +460,7 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst } // Create a NnfStorage resource using the list of Rabbits in the Servers resource -func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) storageProfile, err := r.getStorageProfile(ctx, nnfSystemStorage) @@ -442,7 +479,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy return dwsv1alpha2.NewResourceError("could not get Servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) } - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -460,11 +497,11 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy nnfStorage.Spec.GroupID = 0 // Need to remove all of the AllocationSets in the NnfStorage object before we begin - nnfStorage.Spec.AllocationSets = []nnfv1alpha1.NnfStorageAllocationSetSpec{} + nnfStorage.Spec.AllocationSets = []nnfv1alpha2.NnfStorageAllocationSetSpec{} // Iterate the Servers data elements to pull out the allocation sets for the server for i := range servers.Spec.AllocationSets { - nnfAllocationSet := nnfv1alpha1.NnfStorageAllocationSetSpec{} + nnfAllocationSet := nnfv1alpha2.NnfStorageAllocationSetSpec{} nnfAllocationSet.Name = servers.Spec.AllocationSets[i].Label nnfAllocationSet.Capacity = servers.Spec.AllocationSets[i].AllocationSize @@ -472,7 +509,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy // Create Nodes for this allocation set. for _, storage := range servers.Spec.AllocationSets[i].Storage { - node := nnfv1alpha1.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} + node := nnfv1alpha2.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} nnfAllocationSet.Nodes = append(nnfAllocationSet.Nodes, node) } @@ -498,9 +535,9 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy } // Wait until the NnfStorage has completed. Any errors will bubble up to the NnfSystemStorage -func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) (bool, error) { +func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) (bool, error) { // Check whether the NnfStorage has finished - nnfStorage := &nnfv1alpha1.NnfStorage{ + nnfStorage := &nnfv1alpha2.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -529,7 +566,7 @@ func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfS // Create an NnfAccess using the Computes resource we created earlier. This NnfAccess may or may not create any ClientMount // resources depending on if MakeClientMounts was specified in the NnfSystemStorage spec. The NnfAccess target is "shared", // meaning that multiple compute nodes will access the same storage. -func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) storageProfile, err := r.getStorageProfile(ctx, nnfSystemStorage) @@ -537,7 +574,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys return err } - nnfAccess := &nnfv1alpha1.NnfAccess{ + nnfAccess := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -567,7 +604,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys nnfAccess.Spec.StorageReference = corev1.ObjectReference{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorage{}).Name(), } return ctrl.SetControllerReference(nnfSystemStorage, nnfAccess, r.Scheme) @@ -588,8 +625,8 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys } // Wait for the NnfAccess to be ready. Any errors are bubbled up to the NnfSystemStorage -func (r *NnfSystemStorageReconciler) waitForNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha1.NnfSystemStorage) (bool, error) { - nnfAccess := &nnfv1alpha1.NnfAccess{ +func (r *NnfSystemStorageReconciler) waitForNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha2.NnfSystemStorage) (bool, error) { + nnfAccess := &nnfv1alpha2.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -615,18 +652,41 @@ func (r *NnfSystemStorageReconciler) waitForNnfAccess(ctx context.Context, nnfSy return false, nil } +// NnfSystemStorageEnqueueAll enqueues all of the NnfSystemStorage resources after a watch is triggered +func (r *NnfSystemStorageReconciler) NnfSystemStorageEnqueueAll(ctx context.Context, o client.Object) []reconcile.Request { + log := r.Log.WithValues("NnfSystemStorage", "Enqueue All") + + requests := []reconcile.Request{} + + // Find all the NnfSystemStorage resources and add them to the Request list + nnfSystemStorageList := &nnfv1alpha2.NnfSystemStorageList{} + if err := r.List(context.TODO(), nnfSystemStorageList, []client.ListOption{}...); err != nil { + log.Info("Could not list NnfSystemStorage", "error", err) + return requests + } + + for _, nnfSystemStorage := range nnfSystemStorageList.Items { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace()}}) + } + + return requests +} + // SetupWithManager sets up the controller with the Manager. func (r *NnfSystemStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ - &nnfv1alpha1.NnfAccessList{}, - &nnfv1alpha1.NnfStorageList{}, + &nnfv1alpha2.NnfAccessList{}, + &nnfv1alpha2.NnfStorageList{}, &dwsv1alpha2.ComputesList{}, &dwsv1alpha2.ServersList{}, } return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha1.NnfSystemStorage{}). - Owns(&nnfv1alpha1.NnfAccess{}). - Owns(&nnfv1alpha1.NnfStorage{}). + For(&nnfv1alpha2.NnfSystemStorage{}). + Owns(&dwsv1alpha2.Computes{}). + Owns(&dwsv1alpha2.Servers{}). + Owns(&nnfv1alpha2.NnfStorage{}). + Owns(&nnfv1alpha2.NnfAccess{}). + Watches(&dwsv1alpha2.Storage{}, handler.EnqueueRequestsFromMapFunc(r.NnfSystemStorageEnqueueAll)). Complete(r) } diff --git a/internal/controller/nnfsystemstorage_controller_test.go b/internal/controller/nnfsystemstorage_controller_test.go index 856cf44c7..153da5a85 100644 --- a/internal/controller/nnfsystemstorage_controller_test.go +++ b/internal/controller/nnfsystemstorage_controller_test.go @@ -33,8 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" ) var _ = Describe("NnfSystemStorage Controller Test", func() { @@ -43,12 +42,12 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { "rabbit-systemstorage-node-1", "rabbit-systemstorage-node-2"} - nnfNodes := [2]*nnfv1alpha1.NnfNode{} + nnfNodes := [2]*nnfv1alpha2.NnfNode{} storages := [2]*dwsv1alpha2.Storage{} nodes := [2]*corev1.Node{} var systemConfiguration *dwsv1alpha2.SystemConfiguration - var storageProfile *nnfv1alpha1.NnfStorageProfile + var storageProfile *nnfv1alpha2.NnfStorageProfile var setup sync.Once BeforeEach(func() { @@ -65,7 +64,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - v1alpha1.RabbitNodeSelectorLabel: "true", + nnfv1alpha2.RabbitNodeSelectorLabel: "true", }, }, Status: corev1.NodeStatus{ @@ -80,14 +79,14 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Expect(k8sClient.Create(context.TODO(), nodes[i])).To(Succeed()) - nnfNodes[i] = &nnfv1alpha1.NnfNode{ + nnfNodes[i] = &nnfv1alpha2.NnfNode{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, - Spec: nnfv1alpha1.NnfNodeSpec{ - State: nnfv1alpha1.ResourceEnable, + Spec: nnfv1alpha2.NnfNodeSpec{ + State: nnfv1alpha2.ResourceEnable, }, } Expect(k8sClient.Create(context.TODO(), nnfNodes[i])).To(Succeed()) @@ -268,7 +267,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha1.NnfStorageProfile{} + profExpected := &nnfv1alpha2.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) @@ -281,7 +280,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), nnfNodes[i])).To(Succeed()) - tempNnfNode := &nnfv1alpha1.NnfNode{} + tempNnfNode := &nnfv1alpha2.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodes[i]), tempNnfNode) }).ShouldNot(Succeed()) @@ -302,20 +301,20 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Describe("Create NnfSystemStorage", func() { It("Creates basic system storage", func() { - nnfSystemStorage := &nnfv1alpha1.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha2.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfSystemStorageSpec{ + Spec: nnfv1alpha2.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha1.ComputesTargetAll, + ComputesTarget: nnfv1alpha2.ComputesTargetAll, MakeClientMounts: false, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorageProfile{}).Name(), }, }, } @@ -361,20 +360,20 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }) It("Creates even system storage", func() { - nnfSystemStorage := &nnfv1alpha1.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha2.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfSystemStorageSpec{ + Spec: nnfv1alpha2.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha1.ComputesTargetEven, + ComputesTarget: nnfv1alpha2.ComputesTargetEven, MakeClientMounts: false, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorageProfile{}).Name(), }, }, } @@ -420,21 +419,21 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }) It("Creates system storage with index map", func() { - nnfSystemStorage := &nnfv1alpha1.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha2.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfSystemStorageSpec{ + Spec: nnfv1alpha2.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha1.ComputesTargetPattern, + ComputesTarget: nnfv1alpha2.ComputesTargetPattern, ComputesPattern: []int{0, 1, 2, 3, 4}, MakeClientMounts: false, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorageProfile{}).Name(), }, }, } @@ -480,14 +479,14 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }) It("Creates system storage with excluded Rabbits and computes", func() { - nnfSystemStorage := &nnfv1alpha1.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha2.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha1.NnfSystemStorageSpec{ + Spec: nnfv1alpha2.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha1.ComputesTargetAll, + ComputesTarget: nnfv1alpha2.ComputesTargetAll, ExcludeRabbits: []string{nodeNames[0]}, ExcludeComputes: []string{"1-4", "1-5", "1-6"}, MakeClientMounts: false, @@ -495,7 +494,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha1.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha2.NnfStorageProfile{}).Name(), }, }, } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index e3a396855..2f14d4c9f 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -24,6 +24,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "testing" "go.uber.org/zap" @@ -37,6 +38,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/log" zapcr "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -49,10 +51,12 @@ import ( nnf "github.com/NearNodeFlash/nnf-ec/pkg" nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" _ "github.com/DataWorkflowServices/dws/config/crd/bases" _ "github.com/DataWorkflowServices/dws/config/webhook" _ "github.com/NearNodeFlash/lustre-fs-operator/config/crd/bases" + _ "github.com/NearNodeFlash/lustre-fs-operator/config/webhook" dwsctrls "github.com/DataWorkflowServices/dws/controllers" //+kubebuilder:scaffold:imports @@ -123,13 +127,42 @@ var _ = BeforeSuite(func() { ctx, cancel = context.WithCancel(context.TODO()) By("bootstrapping test environment") + var err error - webhookPaths := []string{ + // See https://github.com/kubernetes-sigs/controller-runtime/issues/1882 + // about getting the conversion webhook to register properly. + // Begin by relocating the code that builds the scheme, so it happens + // before calling envtest.Start(). + // Then add the scheme to envtest.CRDInstallOptions. + + err = dwsv1alpha2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = lusv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = nnfv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = nnfv1alpha2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + webhookPaths := []string{} + webhookPathsAlt := []string{ + filepath.Join("..", "..", "config", "webhook"), filepath.Join("..", "..", "vendor", "github.com", "DataWorkflowServices", "dws", "config", "webhook"), - filepath.Join("..", "..", "config", "dws"), + filepath.Join("..", "..", "vendor", "github.com", "NearNodeFlash", "lustre-fs-operator", "config", "webhook"), } - if env, found := os.LookupEnv("WEBHOOK_DIR"); found { - webhookPaths = append(webhookPaths, env) + // Envtest doesn't run kustomize, so the basic configs don't have the + // `namePrefix` and they will all collide on the same name. The WEBHOOK_DIRS + // variable points to pre-processed webhook configs that have had the + // namePrefix added to them. + if env, found := os.LookupEnv("WEBHOOK_DIRS"); found { + webhookPaths = append(webhookPaths, strings.Split(env, ":")...) + } else { + webhookPaths = append(webhookPaths, webhookPathsAlt...) } testEnv = &envtest.Environment{ @@ -140,27 +173,18 @@ var _ = BeforeSuite(func() { filepath.Join("..", "..", "vendor", "github.com", "DataWorkflowServices", "dws", "config", "crd", "bases"), filepath.Join("..", "..", "vendor", "github.com", "NearNodeFlash", "lustre-fs-operator", "config", "crd", "bases"), }, + CRDInstallOptions: envtest.CRDInstallOptions{ + // This adds the conversion webhook configuration to + // the CRDs. + Scheme: scheme.Scheme, + }, //AttachControlPlaneOutput: true, } - cfg, err := testEnv.Start() + cfg, err = testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = nnfv1alpha1.AddToScheme(testEnv.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = dwsv1alpha2.AddToScheme(testEnv.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = lusv1beta1.AddToScheme(testEnv.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = nnfv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - //+kubebuilder:scaffold:scheme - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) @@ -177,6 +201,60 @@ var _ = BeforeSuite(func() { }) Expect(err).NotTo(HaveOccurred()) + /* + Start webhooks + */ + + err = (&dwsv1alpha2.Workflow{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&lusv1beta1.LustreFileSystem{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfStorageProfile{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfContainerProfile{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfDataMovementProfile{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfAccess{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfDataMovement{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfDataMovementManager{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfLustreMGT{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfNode{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfNodeBlockStorage{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfNodeECData{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfNodeStorage{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfPortManager{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfStorage{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&nnfv1alpha2.NnfSystemStorage{}).SetupWebhookWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + // +crdbumper:scaffold:builder + /* Start DWS pieces */ @@ -195,9 +273,12 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&dwsv1alpha2.Workflow{}).SetupWebhookWithManager(k8sManager) + err = (&dwsctrls.SystemConfigurationReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("SystemConfiguration"), + Scheme: testEnv.Scheme, + }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - /* Start NNF-SOS SLC pieces */ @@ -271,15 +352,6 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha1.NnfStorageProfile{}).SetupWebhookWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&nnfv1alpha1.NnfContainerProfile{}).SetupWebhookWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&nnfv1alpha1.NnfDataMovementProfile{}).SetupWebhookWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - /* Start NNF-SOS NLC pieces */ @@ -292,8 +364,6 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - // +crdbumper:scaffold:builder - // Coordinate the startup of the NLC controllers that use EC. semNnfNodeDone := make(chan struct{}) @@ -301,7 +371,9 @@ var _ = BeforeSuite(func() { Client: k8sManager.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NnfNode"), Scheme: testEnv.Scheme, + Events: make(chan event.GenericEvent), SemaphoreForDone: semNnfNodeDone, + Options: &nnf.Options{}, }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) @@ -321,8 +393,10 @@ var _ = BeforeSuite(func() { Client: k8sManager.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NnfNodeBlockStorage"), Scheme: testEnv.Scheme, + Events: make(chan event.GenericEvent), SemaphoreForStart: semNnfNodeECDone, SemaphoreForDone: semNnfNodeBlockStorageDone, + Options: &nnf.Options{}, }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/mount-daemon/Dockerfile.rpmbuild b/mount-daemon/Dockerfile.rpmbuild index 3a153e245..9729ddf4a 100644 --- a/mount-daemon/Dockerfile.rpmbuild +++ b/mount-daemon/Dockerfile.rpmbuild @@ -35,6 +35,7 @@ COPY api/ api/ COPY internal/ internal/ COPY pkg/ pkg/ COPY vendor/ vendor/ +COPY github/ github/ # Build # the GOARCH has a default value to allow the binary be built according to the host where the command diff --git a/mount-daemon/main.go b/mount-daemon/main.go index b0e9a8d9f..630916fa9 100644 --- a/mount-daemon/main.go +++ b/mount-daemon/main.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" controllers "github.com/NearNodeFlash/nnf-sos/internal/controller" "github.com/NearNodeFlash/nnf-sos/mount-daemon/version" //+kubebuilder:scaffold:imports @@ -69,7 +69,7 @@ type Service struct { func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(dwsv1alpha2.AddToScheme(scheme)) - utilruntime.Must(nnfv1alpha1.AddToScheme(scheme)) + utilruntime.Must(nnfv1alpha2.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/test-tools.sh b/test-tools.sh deleted file mode 100644 index 557372313..000000000 --- a/test-tools.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -function prefix_webhook_names { - SOURCE_DIR=$1 - DEST_DIR=$2 - - mkdir -p $DEST_DIR - cp $SOURCE_DIR/manifests.yaml $DEST_DIR - sed -i.bak -e 's/validating-webhook-configuration/nnf-validating-webhook-configuration/' $DEST_DIR/manifests.yaml - rm $DEST_DIR/manifests.yaml.bak - - cp $SOURCE_DIR/service.yaml $DEST_DIR - sed -i.bak -e 's/webhook-service/nnf-webhook-service/' $DEST_DIR/service.yaml - rm $DEST_DIR/service.yaml.bak -} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go index 69a418452..55cb448d9 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go @@ -22,6 +22,7 @@ package v1alpha2 import ( "github.com/DataWorkflowServices/dws/utils/updater" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -165,6 +166,16 @@ type StorageList struct { Items []Storage `json:"items"` } +func (s *StorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range s.Items { + objectList = append(objectList, &s.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&Storage{}, &StorageList{}) } diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go index 3e4d29fbe..2c65e3a00 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go @@ -82,6 +82,9 @@ type SystemConfigurationSpec struct { type SystemConfigurationStatus struct { // Ready indicates when the SystemConfiguration has been reconciled Ready bool `json:"ready"` + + // Error information + ResourceError `json:",inline"` } //+kubebuilder:object:root=true diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go index f177a88be..97071e315 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go @@ -1132,7 +1132,7 @@ func (in *SystemConfiguration) DeepCopyInto(out *SystemConfiguration) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfiguration. @@ -1250,6 +1250,7 @@ func (in *SystemConfigurationSpec) DeepCopy() *SystemConfigurationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SystemConfigurationStatus) DeepCopyInto(out *SystemConfigurationStatus) { *out = *in + in.ResourceError.DeepCopyInto(&out.ResourceError) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationStatus. diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml index ab8d9898e..df6d0f59e 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml @@ -240,6 +240,37 @@ spec: status: description: SystemConfigurationStatus defines the status of SystemConfiguration properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object ready: description: Ready indicates when the SystemConfiguration has been reconciled diff --git a/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go b/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go new file mode 100644 index 000000000..177fd73f5 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go @@ -0,0 +1,164 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/internal/controller/metrics" + "github.com/DataWorkflowServices/dws/utils/updater" + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + // finalizerDWSSystemConfiguration defines the finalizer name that this controller + // uses on the SystemConfiguration resource. This prevents the SystemConfiguration resource + // from being fully deleted until this controller removes the finalizer. + finalizerDWSSystemConfiguration = "dataworkflowservices.github.io/systemconfiguration" +) + +// SystemConfigurationReconciler reconciles a SystemConfiguration object +type SystemConfigurationReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ChildObjects []dwsv1alpha2.ObjectList +} + +// +kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=systemconfigurations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=systemconfigurations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=systemconfigurations/finalizers,verbs=update +// +kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=storages,verbs=get;list;watch;create;update;patch;delete;deletecollection + +func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + log := r.Log.WithValues("SystemConfiguration", req.NamespacedName) + + metrics.DwsReconcilesTotal.Inc() + + systemConfiguration := &dwsv1alpha2.SystemConfiguration{} + if err := r.Get(ctx, req.NamespacedName, systemConfiguration); err != nil { + // ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Create a status updater that handles the call to r.Status().Update() if any of the fields + // in systemConfiguration.Status{} change + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.SystemConfigurationStatus](systemConfiguration) + defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() + defer func() { systemConfiguration.Status.SetResourceErrorAndLog(err, log) }() + + // Handle cleanup if the resource is being deleted + if !systemConfiguration.GetDeletionTimestamp().IsZero() { + if !controllerutil.ContainsFinalizer(systemConfiguration, finalizerDWSSystemConfiguration) { + return ctrl.Result{}, nil + } + + deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, systemConfiguration) + if err != nil { + return ctrl.Result{}, err + } + + if !deleteStatus.Complete() { + return ctrl.Result{}, nil + } + + controllerutil.RemoveFinalizer(systemConfiguration, finalizerDWSSystemConfiguration) + if err := r.Update(ctx, systemConfiguration); err != nil { + if !apierrors.IsConflict(err) { + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: true}, nil + } + + return ctrl.Result{}, nil + } + + // Add finalizer if it doesn't exist + if !controllerutil.ContainsFinalizer(systemConfiguration, finalizerDWSSystemConfiguration) { + controllerutil.AddFinalizer(systemConfiguration, finalizerDWSSystemConfiguration) + if err := r.Update(ctx, systemConfiguration); err != nil { + if !apierrors.IsConflict(err) { + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: true}, nil + } + + return ctrl.Result{}, nil + } + + for _, storageNode := range systemConfiguration.Spec.StorageNodes { + // Create a storage resource for each storage node listed in the system configuration + storage := &dwsv1alpha2.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageNode.Name, + Namespace: corev1.NamespaceDefault, + }, + } + + result, err := ctrl.CreateOrUpdate(ctx, r.Client, storage, + func() error { + dwsv1alpha2.AddOwnerLabels(storage, systemConfiguration) + labels := storage.GetLabels() + labels[dwsv1alpha2.StorageTypeLabel] = storageNode.Type + storage.SetLabels(labels) + + return ctrl.SetControllerReference(systemConfiguration, storage, r.Scheme) + }) + + if err != nil { + return ctrl.Result{}, dwsv1alpha2.NewResourceError("CreateOrUpdate failed for storage: %v", client.ObjectKeyFromObject(storage)).WithError(err) + } + + if result == controllerutil.OperationResultCreated { + log.Info("Created storage", "name", storage.Name) + } else if result == controllerutil.OperationResultNone { + // no change + } else { + log.Info("Updated storage", "name", storage.Name) + } + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *SystemConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.ChildObjects = []dwsv1alpha2.ObjectList{ + &dwsv1alpha2.StorageList{}, + } + + return ctrl.NewControllerManagedBy(mgr). + For(&dwsv1alpha2.SystemConfiguration{}). + Owns(&dwsv1alpha2.Storage{}). + Complete(r) +} diff --git a/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/kustomization.yaml b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/kustomization.yaml new file mode 100644 index 000000000..9cf26134e --- /dev/null +++ b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/kustomizeconfig.yaml b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/kustomizeconfig.yaml new file mode 100644 index 000000000..492aac65a --- /dev/null +++ b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: +# - kind: MutatingWebhookConfiguration +# group: admissionregistration.k8s.io +# path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +#- kind: MutatingWebhookConfiguration +# group: admissionregistration.k8s.io +# path: webhooks/clientConfig/service/namespace +# create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/manifests.yaml b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/manifests.yaml new file mode 100644 index 000000000..9adfd880b --- /dev/null +++ b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/manifests.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-lus-cray-hpe-com-v1beta1-lustrefilesystem + failurePolicy: Fail + name: vlustrefilesystem.kb.io + rules: + - apiGroups: + - lus.cray.hpe.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - lustrefilesystems + sideEffects: None diff --git a/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/service.yaml b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/service.yaml new file mode 100644 index 000000000..3f638bd9c --- /dev/null +++ b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/service.yaml @@ -0,0 +1,13 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/webhook.go b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/webhook.go new file mode 100644 index 000000000..25205155a --- /dev/null +++ b/vendor/github.com/NearNodeFlash/lustre-fs-operator/config/webhook/webhook.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package webhook + +// Empty file so other repositories can import the custom resource definitions declared in +// this folder. This is useful for testing where this repository is vendored and the CRDs +// need to be deployed into the cluster as part of envtest execution. diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/controller.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/controller.go index a016f7bb0..ae1af5587 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/controller.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/controller.go @@ -1,5 +1,5 @@ /* - * Copyright 2020, 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2020-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -52,12 +52,17 @@ const ( ) type Options struct { - mock bool // Enable mock interfaces for Switches, NVMe, and NNF - cli bool // Enable CLI commands instead of binary - persistence bool // Enable persistent object storage; used during crash/reboot recovery - json string // Initialize the element controller with the provided json file - direct string // Enable direct management of NVMe devices matching this regexp pattern - InitializeAndExit bool // Initialize all controllers then exit without starting the http server (mfg use) + mock bool // Enable mock interfaces for Switches, NVMe, and NNF + cli bool // Enable CLI commands instead of binary + persistence bool // Enable persistent object storage; used during crash/reboot recovery + json string // Initialize the element controller with the provided json file + direct string // Enable direct management of NVMe devices matching this regexp pattern + InitializeAndExit bool // Initialize all controllers then exit without starting the http server (mfg use) + deleteUnknownVolumes bool // Delete volumes not represented by a storage pool at the end of initialization +} + +func (o *Options) DeleteUnknownVolumes() bool { + return o.deleteUnknownVolumes } func newDefaultOptions() *Options { @@ -77,6 +82,7 @@ func BindFlags(fs *flag.FlagSet) *Options { fs.StringVar(&opts.json, "json", "", "Initialize database with provided json file") fs.StringVar(&opts.direct, "direct", opts.direct, "Enable direct management of NVMe block devices matching this regexp pattern. Implies Mock.") fs.BoolVar(&opts.InitializeAndExit, "initializeAndExit", opts.InitializeAndExit, "Initialize all hardware controllers, then exit without starting the http server. Useful in hardware bringup") + fs.BoolVar(&opts.deleteUnknownVolumes, "deleteUnknownVolumes", opts.deleteUnknownVolumes, "Delete volumes not represented by storage pools") nvme.BindFlags(fs) @@ -117,16 +123,16 @@ func NewController(opts *Options) *ec.Controller { persistent.StorageProvider = persistent.NewJsonFilePersistentStorageProvider(opts.json) } - return ec.NewController(Name, Port, Version, NewDefaultApiRouters(switchCtrl, nvmeCtrl, nnfCtrl)) + return ec.NewController(Name, Port, Version, NewDefaultApiRouters(switchCtrl, nvmeCtrl, nnfCtrl, opts.deleteUnknownVolumes)) } // NewDefaultApiRouters - -func NewDefaultApiRouters(switchCtrl fabric.SwitchtecControllerInterface, nvmeCtrl nvme.NvmeController, nnfCtrl nnf.NnfControllerInterface) ec.Routers { +func NewDefaultApiRouters(switchCtrl fabric.SwitchtecControllerInterface, nvmeCtrl nvme.NvmeController, nnfCtrl nnf.NnfControllerInterface, nnfUnknownVolumes bool) ec.Routers { routers := []ec.Router{ fabric.NewDefaultApiRouter(fabric.NewDefaultApiService(), switchCtrl), nvme.NewDefaultApiRouter(nvme.NewDefaultApiService(), nvmeCtrl), - nnf.NewDefaultApiRouter(nnf.NewDefaultApiService(nnf.NewDefaultStorageService()), nnfCtrl), + nnf.NewDefaultApiRouter(nnf.NewDefaultApiService(nnf.NewDefaultStorageService(nnfUnknownVolumes)), nnfCtrl), telemetry.NewDefaultApiRouter(telemetry.NewDefaultApiService()), event.NewDefaultApiRouter(event.NewDefaultApiService()), msgreg.NewDefaultApiRouter(msgreg.NewDefaultApiService()), diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/ec/ec.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/ec/ec.go index 4c947f720..b415dc947 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/ec/ec.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/ec/ec.go @@ -1,5 +1,5 @@ /* - * Copyright 2020, 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2020-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -109,7 +109,7 @@ type Options struct { } func NewDefaultOptions() *Options { - return &Options{Http: true, Port: 8080, Log: false, Verbose: false} + return &Options{Http: false, Port: 8080, Log: false, Verbose: false} } func NewDefaultTestOptions() *Options { @@ -322,7 +322,7 @@ func (c *Controller) Init(opts *Options) error { // Run - Run a controller with standard behavior - that is with GRPC server and // request handling that operates by unpacking the GRPC request and -// forwardining it to the element controller's handlers. +// forwarding it to the element controller's handlers. func (c *Controller) Run() error { if c.processor == nil { return fmt.Errorf("controller processor uninitialized") diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/allocation_policy.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/allocation_policy.go index 11ce68590..aae9379d4 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/allocation_policy.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/allocation_policy.go @@ -1,5 +1,5 @@ /* - * Copyright 2020, 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2020-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -210,7 +210,7 @@ func (p *SpareAllocationPolicy) Allocate(pid uuid.UUID) ([]nvme.ProvidingVolume, return volumes, fmt.Errorf("Create Volume Failure: %s", err) } - remainingCapacityBytes = remainingCapacityBytes - volume.GetCapaityBytes() + remainingCapacityBytes = remainingCapacityBytes - volume.GetCapacityBytes() volumes = append(volumes, nvme.ProvidingVolume{Storage: storage, VolumeId: volume.Id()}) } diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/manager.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/manager.go index 58d282ea5..cd2d78bcb 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/manager.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/manager.go @@ -1,5 +1,5 @@ /* - * Copyright 2020, 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2020-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -55,7 +55,8 @@ var storageService = StorageService{ health: sf.CRITICAL_RH, } -func NewDefaultStorageService() StorageServiceApi { +func NewDefaultStorageService(unknownVolumes bool) StorageServiceApi { + storageService.deleteUnknownVolumes = unknownVolumes return NewAerService(&storageService) // Wrap the default storage service with Advanced Error Reporting capabilities } @@ -76,9 +77,12 @@ type StorageService struct { // Index of the Id field of any Storage Service resource (Pools, Groups, Endpoints, FileSystems) // That is, given a Storage Service resource OdataId field, ResourceIndex will correspond to the - // index within the OdataId splity by "/" i.e. strings.split(OdataId, "/")[ResourceIndex] + // index within the OdataId split by "/" i.e. strings.split(OdataId, "/")[ResourceIndex] resourceIndex int + // This flag controls whether we delete volumes that don't appear in storage pools we know about. + deleteUnknownVolumes bool + log ec.Logger } @@ -528,8 +532,8 @@ func (s *StorageService) EventHandler(e event.Event) error { return nil } - // Check if the fabric is ready; that is all devices are enumerated and discovery - // is complete. + // Check if the fabric is ready; + // that is all devices are enumerated and discovery is complete. if e.Is(msgreg.FabricReadyNnf("")) { log.V(1).Info("Fabric ready") @@ -539,8 +543,10 @@ func (s *StorageService) EventHandler(e event.Event) error { } // Remove any namespaces that are not part of a Storage Pool - log.V(2).Info("Cleanup obsolete volumes") - s.cleanupVolumes() + if s.deleteUnknownVolumes { + log.V(2).Info("Cleanup unknown volumes") + s.cleanupVolumes() + } s.state = sf.ENABLED_RST s.health = sf.OK_RH diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/storage_pool.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/storage_pool.go index f091cf678..cf68446fd 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/storage_pool.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf/storage_pool.go @@ -1,5 +1,5 @@ /* - * Copyright 2020, 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2020-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -55,7 +55,7 @@ type AllocatedVolume struct { func (p *StoragePool) GetCapacityBytes() (capacityBytes uint64) { for _, pv := range p.providingVolumes { - capacityBytes += pv.Storage.FindVolume(pv.VolumeId).GetCapaityBytes() + capacityBytes += pv.Storage.FindVolume(pv.VolumeId).GetCapacityBytes() } return capacityBytes } diff --git a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme/manager.go b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme/manager.go index 3c91c94c1..c9cbfa517 100644 --- a/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme/manager.go +++ b/vendor/github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme/manager.go @@ -1,5 +1,5 @@ /* - * Copyright 2020, 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2020-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -82,7 +82,7 @@ type Manager struct { // Command-Line Options purge bool // Purge existing namespaces on storage controllers - purgeMockDb bool // Purge the persistent mock databse + purgeMockDb bool // Purge the persistent mock database log ec.Logger } @@ -635,7 +635,7 @@ func (s *Storage) findVolume(volumeId string) *Volume { func (v *Volume) Id() string { return v.id } func (v *Volume) GetOdataId() string { return v.storage.OdataId() + "/Volumes/" + v.id } -func (v *Volume) GetCapaityBytes() uint64 { return uint64(v.capacityBytes) } +func (v *Volume) GetCapacityBytes() uint64 { return uint64(v.capacityBytes) } func (v *Volume) GetNamespaceId() nvme.NamespaceIdentifier { return v.namespaceId } func (v *Volume) GetGloballyUniqueIdentifier() nvme.NamespaceGloballyUniqueIdentifier { diff --git a/vendor/modules.txt b/vendor/modules.txt index 687c5c82c..4accb5571 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/DataWorkflowServices/dws v0.0.1-0.20240820212105-5950825b3d74 +# github.com/DataWorkflowServices/dws v0.0.1-0.20240913193141-737bcd946a02 ## explicit; go 1.21 github.com/DataWorkflowServices/dws/api/v1alpha2 github.com/DataWorkflowServices/dws/config/crd/bases @@ -11,11 +11,12 @@ github.com/DataWorkflowServices/dws/utils/updater # github.com/HewlettPackard/structex v1.0.4 ## explicit; go 1.14 github.com/HewlettPackard/structex -# github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240820214524-99d5da17471d +# github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240913195900-b3285e54755e ## explicit; go 1.21 github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1 github.com/NearNodeFlash/lustre-fs-operator/config/crd/bases -# github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240820195316-cb407b151cb4 +github.com/NearNodeFlash/lustre-fs-operator/config/webhook +# github.com/NearNodeFlash/nnf-ec v0.0.1-0.20240912200758-f862bc773739 ## explicit; go 1.19 github.com/NearNodeFlash/nnf-ec/internal/switchtec/pkg/nvme github.com/NearNodeFlash/nnf-ec/internal/switchtec/pkg/switchtec