From 3eb01c92f75216ddd08f4c3da178cbb8a56ce560 Mon Sep 17 00:00:00 2001 From: mprahl Date: Tue, 17 Oct 2023 15:11:57 -0400 Subject: [PATCH] Cache hub template queries This leverages the new caching functionality in go-template-utils so that API queries are not duplicated. As a side-effect, this should cause less reconciles because when starting a watch due to a hub template, it does not cause an initial reconcile merely from the watch being created. Relates: https://issues.redhat.com/browse/ACM-7402 https://issues.redhat.com/browse/ACM-7398 Signed-off-by: mprahl (cherry picked from commit df22cc22a27f95eb3a276e9697bb95849d5eb866) --- controllers/propagator/encryption.go | 2 +- controllers/propagator/encryption_test.go | 2 +- controllers/propagator/propagation.go | 179 ++++++++---------- .../propagator/replicatedpolicy_controller.go | 66 ++++--- .../propagator/replicatedpolicy_setup.go | 38 +--- go.mod | 8 +- go.sum | 12 +- main.go | 28 ++- 8 files changed, 160 insertions(+), 175 deletions(-) diff --git a/controllers/propagator/encryption.go b/controllers/propagator/encryption.go index d3e1b808..64ba9313 100644 --- a/controllers/propagator/encryption.go +++ b/controllers/propagator/encryption.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/stolostron/go-template-utils/v3/pkg/templates" + "github.com/stolostron/go-template-utils/v4/pkg/templates" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/controllers/propagator/encryption_test.go b/controllers/propagator/encryption_test.go index 31ceb86c..b5937d38 100644 --- a/controllers/propagator/encryption_test.go +++ b/controllers/propagator/encryption_test.go @@ -11,7 +11,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/stolostron/go-template-utils/v3/pkg/templates" + "github.com/stolostron/go-template-utils/v4/pkg/templates" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" diff --git a/controllers/propagator/propagation.go b/controllers/propagator/propagation.go index aef8dae7..3d54ac95 100644 --- a/controllers/propagator/propagation.go +++ b/controllers/propagator/propagation.go @@ -13,18 +13,16 @@ import ( "sync" "time" - templates "github.com/stolostron/go-template-utils/v3/pkg/templates" + templates "github.com/stolostron/go-template-utils/v4/pkg/templates" k8sdepwatches "github.com/stolostron/kubernetes-dependency-watches/client" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" - clusterv1 "open-cluster-management.io/api/cluster/v1" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" appsv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,16 +34,11 @@ import ( ) const ( - startDelim = "{{hub" - stopDelim = "hub}}" + TemplateStartDelim = "{{hub" + TemplateStopDelim = "hub}}" TriggerUpdateAnnotation = "policy.open-cluster-management.io/trigger-update" ) -var ( - kubeConfig *rest.Config - kubeClient *kubernetes.Interface -) - type Propagator struct { client.Client Scheme *runtime.Scheme @@ -54,24 +47,6 @@ type Propagator struct { ReplicatedPolicyUpdates chan event.GenericEvent } -func Initialize(kubeconfig *rest.Config, kubeclient *kubernetes.Interface) { - kubeConfig = kubeconfig - kubeClient = kubeclient -} - -// getTemplateCfg returns the default policy template configuration. -func getTemplateCfg() templates.Config { - // (Encryption settings are set during the processTemplates method) - // Adding eight spaces to the indentation makes the usage of `indent N` be from the logical - // starting point of the resource object wrapped in the ConfigurationPolicy. - return templates.Config{ - AdditionalIndentation: 8, - DisabledFunctions: []string{}, - StartDelim: startDelim, - StopDelim: stopDelim, - } -} - // clusterDecision contains a single decision where the replicated policy // should be processed and any overrides to the root policy type clusterDecision struct { @@ -432,7 +407,7 @@ func (r *RootPolicyReconciler) handleRootPolicy(instance *policiesv1.Policy) err // a helper to quickly check if there are any templates in any of the policy templates func policyHasTemplates(instance *policiesv1.Policy) bool { for _, policyT := range instance.Spec.PolicyTemplates { - if templates.HasTemplate(policyT.ObjectDefinition.Raw, startDelim, false) { + if templates.HasTemplate(policyT.ObjectDefinition.Raw, TemplateStartDelim, false) { return true } } @@ -440,15 +415,42 @@ func policyHasTemplates(instance *policiesv1.Policy) bool { return false } +type templateCtx struct { + ManagedClusterName string + ManagedClusterLabels map[string]string +} + +func addManagedClusterLabels(clusterName string) func(templates.CachingQueryAPI, interface{}) (interface{}, error) { + return func(api templates.CachingQueryAPI, ctx interface{}) (interface{}, error) { + typedCtx, ok := ctx.(templateCtx) + if !ok { + return ctx, nil + } + + managedClusterGVK := schema.GroupVersionKind{ + Group: "cluster.open-cluster-management.io", + Version: "v1", + Kind: "ManagedCluster", + } + + managedCluster, err := api.Get(managedClusterGVK, "", clusterName) + if err != nil { + return ctx, err + } + + typedCtx.ManagedClusterLabels = managedCluster.GetLabels() + + return typedCtx, nil + } +} + // Iterates through policy definitions and processes hub templates. A special annotation // policy.open-cluster-management.io/trigger-update is used to trigger reprocessing of the templates // and ensure that replicated-policies in the cluster are updated only if there is a change. This // annotation is deleted from the replicated policies and not propagated to the cluster namespaces. func (r *ReplicatedPolicyReconciler) processTemplates( replicatedPlc *policiesv1.Policy, decision appsv1.PlacementDecision, rootPlc *policiesv1.Policy, -) ( - map[k8sdepwatches.ObjectIdentifier]bool, error, -) { +) error { log := log.WithValues( "policyName", rootPlc.GetName(), "policyNamespace", rootPlc.GetNamespace(), @@ -457,7 +459,6 @@ func (r *ReplicatedPolicyReconciler) processTemplates( log.V(1).Info("Processing templates") annotations := replicatedPlc.GetAnnotations() - templateRefObjs := map[k8sdepwatches.ObjectIdentifier]bool{} // handle possible nil map if len(annotations) == 0 { @@ -469,7 +470,7 @@ func (r *ReplicatedPolicyReconciler) processTemplates( if boolDisable, err := strconv.ParseBool(disable); err == nil && boolDisable { log.Info("Detected the disable-templates annotation. Will not process templates.") - return templateRefObjs, nil + return nil } } @@ -480,29 +481,38 @@ func (r *ReplicatedPolicyReconciler) processTemplates( replicatedPlc.SetAnnotations(annotations) } - templateCfg := getTemplateCfg() - templateCfg.LookupNamespace = rootPlc.GetNamespace() - templateCfg.ClusterScopedAllowList = []templates.ClusterScopedObjectIdentifier{{ - Group: "cluster.open-cluster-management.io", - Kind: "ManagedCluster", - Name: decision.ClusterName, - }} + plcGVK := replicatedPlc.GroupVersionKind() - tmplResolver, err := templates.NewResolver(kubeClient, kubeConfig, templateCfg) - if err != nil { - log.Error(err, "Error instantiating template resolver") - panic(err) + templateResolverOptions := templates.ResolveOptions{ + ClusterScopedAllowList: []templates.ClusterScopedObjectIdentifier{ + { + Group: "cluster.open-cluster-management.io", + Kind: "ManagedCluster", + Name: decision.ClusterName, + }, + }, + DisableAutoCacheCleanUp: true, + LookupNamespace: rootPlc.GetNamespace(), + Watcher: &k8sdepwatches.ObjectIdentifier{ + Group: plcGVK.Group, + Version: plcGVK.Version, + Kind: plcGVK.Kind, + Namespace: replicatedPlc.GetNamespace(), + Name: replicatedPlc.GetName(), + }, } + var templateResult templates.TemplateResult + // A policy can have multiple policy templates within it, iterate and process each for _, policyT := range replicatedPlc.Spec.PolicyTemplates { - if !templates.HasTemplate(policyT.ObjectDefinition.Raw, templateCfg.StartDelim, false) { + if !templates.HasTemplate(policyT.ObjectDefinition.Raw, TemplateStartDelim, false) { continue } if !isConfigurationPolicy(policyT) { // has Templates but not a configuration policy - err = k8serrors.NewBadRequest("Templates are restricted to only Configuration Policies") + err := k8serrors.NewBadRequest("Templates are restricted to only Configuration Policies") log.Error(err, "Not a Configuration Policy") r.Recorder.Event(rootPlc, "Warning", "PolicyPropagation", @@ -513,51 +523,30 @@ func (r *ReplicatedPolicyReconciler) processTemplates( ), ) - return templateRefObjs, err + return err } log.V(1).Info("Found an object definition with templates") - templateContext := struct { - ManagedClusterName string - ManagedClusterLabels map[string]string - }{ - ManagedClusterName: decision.ClusterName, - } + templateContext := templateCtx{ManagedClusterName: decision.ClusterName} if strings.Contains(string(policyT.ObjectDefinition.Raw), "ManagedClusterLabels") { - templateRefObjs[k8sdepwatches.ObjectIdentifier{ - Group: "cluster.open-cluster-management.io", - Version: "v1", - Kind: "ManagedCluster", - Namespace: "", - Name: decision.ClusterName, - }] = true - - managedCluster := &clusterv1.ManagedCluster{} - - err := r.Get(context.TODO(), types.NamespacedName{Name: decision.ClusterName}, managedCluster) - if err != nil { - log.Error(err, "Failed to get the ManagedCluster in order to use its labels in a hub template") - } - - // if an error occurred, the ManagedClusterLabels will just be left empty - templateContext.ManagedClusterLabels = managedCluster.Labels + templateResolverOptions.ContextTransformers = append( + templateResolverOptions.ContextTransformers, addManagedClusterLabels(decision.ClusterName), + ) } // Handle value encryption initialization - usesEncryption := templates.UsesEncryption( - policyT.ObjectDefinition.Raw, templateCfg.StartDelim, templateCfg.StopDelim, - ) + usesEncryption := templates.UsesEncryption(policyT.ObjectDefinition.Raw, TemplateStartDelim, TemplateStopDelim) // Initialize AES Key and initialization vector - if usesEncryption && !templateCfg.EncryptionEnabled { + if usesEncryption && !templateResolverOptions.EncryptionEnabled { log.V(1).Info("Found an object definition requiring encryption. Handling encryption keys.") // Get/generate the encryption key encryptionKey, err := r.getEncryptionKey(decision.ClusterName) if err != nil { log.Error(err, "Failed to get/generate the policy encryption key") - return templateRefObjs, err + return err } // Get/generate the initialization vector @@ -567,34 +556,25 @@ func (r *ReplicatedPolicyReconciler) processTemplates( if err != nil { log.Error(err, "Failed to get initialization vector") - return templateRefObjs, err + return err } // Set the initialization vector in the annotations replicatedPlc.SetAnnotations(annotations) // Set the EncryptionConfig with the retrieved key - templateCfg.EncryptionConfig = templates.EncryptionConfig{ + templateResolverOptions.EncryptionConfig = templates.EncryptionConfig{ EncryptionEnabled: true, AESKey: encryptionKey, InitializationVector: initializationVector, } - - err = tmplResolver.SetEncryptionConfig(templateCfg.EncryptionConfig) - if err != nil { - log.Error(err, "Error setting encryption configuration") - - return templateRefObjs, err - } } - templateResult, tplErr := tmplResolver.ResolveTemplate(policyT.ObjectDefinition.Raw, templateContext) + var tplErr error - // Record the referenced objects in the template even if there is an error. This is because a change in the - // object could fix the error. - for _, refObj := range templateResult.ReferencedObjects { - templateRefObjs[refObj] = true - } + templateResult, tplErr = r.TemplateResolver.ResolveTemplate( + policyT.ObjectDefinition.Raw, templateContext, &templateResolverOptions, + ) if tplErr != nil { log.Error(tplErr, "Failed to resolve templates") @@ -635,7 +615,7 @@ func (r *ReplicatedPolicyReconciler) processTemplates( } } - return templateRefObjs, tplErr + return tplErr } policyT.ObjectDefinition.Raw = templateResult.ResolvedJSON @@ -646,7 +626,7 @@ func (r *ReplicatedPolicyReconciler) processTemplates( jsonErr := json.Unmarshal(templateResult.ResolvedJSON, policyTObjectUnstructured) if jsonErr != nil { - return templateRefObjs, fmt.Errorf("failed to unmarshal the object definition to JSON: %w", jsonErr) + return fmt.Errorf("failed to unmarshal the object definition to JSON: %w", jsonErr) } policyTAnnotations := policyTObjectUnstructured.GetAnnotations() @@ -663,7 +643,7 @@ func (r *ReplicatedPolicyReconciler) processTemplates( updatedPolicyT, jsonErr := json.Marshal(policyTObjectUnstructured) if jsonErr != nil { - return templateRefObjs, fmt.Errorf("failed to marshal the policy template to JSON: %w", jsonErr) + return fmt.Errorf("failed to marshal the policy template to JSON: %w", jsonErr) } policyT.ObjectDefinition.Raw = updatedPolicyT @@ -671,9 +651,16 @@ func (r *ReplicatedPolicyReconciler) processTemplates( } } + if templateResult.CacheCleanUp != nil { + err := templateResult.CacheCleanUp() + if err != nil { + return err + } + } + log.V(1).Info("Successfully processed templates") - return templateRefObjs, nil + return nil } func isConfigurationPolicy(policyT *policiesv1.PolicyTemplate) bool { diff --git a/controllers/propagator/replicatedpolicy_controller.go b/controllers/propagator/replicatedpolicy_controller.go index e0fe9f7b..88b2eb22 100644 --- a/controllers/propagator/replicatedpolicy_controller.go +++ b/controllers/propagator/replicatedpolicy_controller.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + templates "github.com/stolostron/go-template-utils/v4/pkg/templates" k8sdepwatches "github.com/stolostron/kubernetes-dependency-watches/client" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" @@ -25,6 +26,7 @@ type ReplicatedPolicyReconciler struct { Propagator ResourceVersions *sync.Map DynamicWatcher k8sdepwatches.DynamicWatcher + TemplateResolver *templates.TemplateResolver } func (r *ReplicatedPolicyReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { @@ -33,7 +35,7 @@ func (r *ReplicatedPolicyReconciler) Reconcile(ctx context.Context, request ctrl // Set the hub template watch metric after reconcile defer func() { - hubTempWatches := r.DynamicWatcher.GetWatchCount() + hubTempWatches := r.TemplateResolver.GetWatchCount() log.V(3).Info("Setting hub template watch metric", "value", hubTempWatches) hubTemplateActiveWatchesMetric.Set(float64(hubTempWatches)) @@ -207,6 +209,18 @@ func (r *ReplicatedPolicyReconciler) Reconcile(ctx context.Context, request ctrl return reconcile.Result{}, err } + instanceGVK := desiredReplicatedPolicy.GroupVersionKind() + instanceObjID := k8sdepwatches.ObjectIdentifier{ + Group: instanceGVK.Group, + Version: instanceGVK.Version, + Kind: instanceGVK.Kind, + Namespace: request.Namespace, + Name: request.Name, + } + + // save the watcherError for later, so that the policy can still be updated now. + var watcherErr error + if policyHasTemplates(rootPolicy) { if replicatedExists { // If the replicated policy has an initialization vector specified, set it for processing @@ -226,36 +240,23 @@ func (r *ReplicatedPolicyReconciler) Reconcile(ctx context.Context, request ctrl // #nosec G104 -- any errors are logged and recorded in the processTemplates method, // but the ignored status will be handled appropriately by the policy controllers on // the managed cluster(s). - templObjsToWatch, _ := r.processTemplates(desiredReplicatedPolicy, decision.Cluster, rootPolicy) - - for objID, val := range templObjsToWatch { - if val { - objsToWatch[objID] = true - } + _ = r.processTemplates(desiredReplicatedPolicy, decision.Cluster, rootPolicy) + } else { + watcherErr := r.TemplateResolver.UncacheWatcher(instanceObjID) + if watcherErr != nil { + log.Error(watcherErr, "Failed to uncache objects related to the replicated policy's templates") } } - instanceGVK := desiredReplicatedPolicy.GroupVersionKind() - instanceObjID := k8sdepwatches.ObjectIdentifier{ - Group: instanceGVK.Group, - Version: instanceGVK.Version, - Kind: instanceGVK.Kind, - Namespace: request.Namespace, - Name: request.Name, - } - refObjs := make([]k8sdepwatches.ObjectIdentifier, 0, len(objsToWatch)) - - for refObj := range objsToWatch { - refObjs = append(refObjs, refObj) - } - - // save the watcherError for later, so that the policy can still be updated now. - var watcherErr error + if len(objsToWatch) != 0 { + refObjs := make([]k8sdepwatches.ObjectIdentifier, 0, len(objsToWatch)) + for objToWatch := range objsToWatch { + refObjs = append(refObjs, objToWatch) + } - if len(refObjs) != 0 { watcherErr = r.DynamicWatcher.AddOrUpdateWatcher(instanceObjID, refObjs...) if watcherErr != nil { - log.Error(watcherErr, "Failed to update the dynamic watches for the hub policy templates") + log.Error(watcherErr, "Failed to update the dynamic watches for the policy set dependencies") } } else { watcherErr = r.DynamicWatcher.RemoveWatcher(instanceObjID) @@ -316,13 +317,24 @@ func (r *ReplicatedPolicyReconciler) Reconcile(ctx context.Context, request ctrl func (r *ReplicatedPolicyReconciler) cleanUpReplicated(ctx context.Context, replicatedPolicy *policiesv1.Policy) error { gvk := replicatedPolicy.GroupVersionKind() - watcherErr := r.DynamicWatcher.RemoveWatcher(k8sdepwatches.ObjectIdentifier{ + objID := k8sdepwatches.ObjectIdentifier{ Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind, Namespace: replicatedPolicy.Namespace, Name: replicatedPolicy.Name, - }) + } + + watcherErr := r.DynamicWatcher.RemoveWatcher(objID) + + uncacheErr := r.TemplateResolver.UncacheWatcher(objID) + if uncacheErr != nil { + if watcherErr == nil { + watcherErr = uncacheErr + } else { + watcherErr = fmt.Errorf("%w; %w", watcherErr, uncacheErr) + } + } rsrcVersKey := replicatedPolicy.GetNamespace() + "/" + replicatedPolicy.GetName() diff --git a/controllers/propagator/replicatedpolicy_setup.go b/controllers/propagator/replicatedpolicy_setup.go index d4778da3..1c5348de 100644 --- a/controllers/propagator/replicatedpolicy_setup.go +++ b/controllers/propagator/replicatedpolicy_setup.go @@ -1,20 +1,15 @@ package propagator import ( - "context" "sync" - "time" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" policiesv1 "open-cluster-management.io/governance-policy-propagator/api/v1" @@ -22,7 +17,11 @@ import ( ) func (r *ReplicatedPolicyReconciler) SetupWithManager( - mgr ctrl.Manager, maxConcurrentReconciles uint, dynWatcherSrc source.Source, updateSrc source.Source, + mgr ctrl.Manager, + maxConcurrentReconciles uint, + dependenciesSource source.Source, + updateSrc source.Source, + templateSrc source.Source, ) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: int(maxConcurrentReconciles)}). @@ -30,18 +29,9 @@ func (r *ReplicatedPolicyReconciler) SetupWithManager( For( &policiesv1.Policy{}, builder.WithPredicates(replicatedPolicyPredicates(r.ResourceVersions))). - WatchesRawSource( - dynWatcherSrc, - // The dependency-watcher could create an event before the same sort of watch in the - // controller-runtime triggers an update in the cache. This tries to ensure the cache is - // updated before the reconcile is triggered. - &delayGeneric{ - EventHandler: &handler.EnqueueRequestForObject{}, - delay: time.Second * 3, - }). - WatchesRawSource( - updateSrc, - &handler.EnqueueRequestForObject{}). + WatchesRawSource(dependenciesSource, &handler.EnqueueRequestForObject{}). + WatchesRawSource(updateSrc, &handler.EnqueueRequestForObject{}). + WatchesRawSource(templateSrc, &handler.EnqueueRequestForObject{}). Complete(r) } @@ -152,15 +142,3 @@ func safeWriteLoad(resourceVersions *sync.Map, key string) *lockingRsrcVersion { return newRsrc } - -type delayGeneric struct { - handler.EventHandler - delay time.Duration -} - -func (d *delayGeneric) Generic(_ context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { - q.AddAfter(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Object.GetName(), - Namespace: evt.Object.GetNamespace(), - }}, d.delay) -} diff --git a/go.mod b/go.mod index 9548a780..a58f6e47 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.20 require ( github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-logr/logr v1.2.4 github.com/go-logr/zapr v1.2.4 github.com/golang-migrate/migrate/v4 v4.16.2 github.com/google/go-cmp v0.5.9 @@ -14,8 +13,8 @@ require ( github.com/prometheus/client_golang v1.15.1 github.com/spf13/pflag v1.0.5 github.com/stolostron/go-log-utils v0.1.2 - github.com/stolostron/go-template-utils/v3 v3.3.0 - github.com/stolostron/kubernetes-dependency-watches v0.3.0 + github.com/stolostron/go-template-utils/v4 v4.0.0 + github.com/stolostron/kubernetes-dependency-watches v0.5.1 github.com/stretchr/testify v1.8.1 k8s.io/api v0.27.2 k8s.io/apimachinery v0.27.2 @@ -37,6 +36,7 @@ require ( github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -52,7 +52,6 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.15 // indirect - github.com/jellydator/ttlcache/v3 v3.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -75,7 +74,6 @@ require ( golang.org/x/crypto v0.8.0 // indirect golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect diff --git a/go.sum b/go.sum index aa4eddf8..559a09ea 100644 --- a/go.sum +++ b/go.sum @@ -123,8 +123,6 @@ github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/jellydator/ttlcache/v3 v3.0.1 h1:cHgCSMS7TdQcoprXnWUptJZzyFsqs18Lt8VVhRuZYVU= -github.com/jellydator/ttlcache/v3 v3.0.1/go.mod h1:WwTaEmcXQ3MTjOm4bsZoDFiCu/hMvNWLO1w67RXz6h4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -196,10 +194,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stolostron/go-log-utils v0.1.2 h1:7l1aJWvBqU2+DUyimcslT5SJpdygVY/clRDmX5sO29c= github.com/stolostron/go-log-utils v0.1.2/go.mod h1:8zrB8UJmp1rXhv3Ck9bBl5SpNfKk3SApeElbo96YRtQ= -github.com/stolostron/go-template-utils/v3 v3.3.0 h1:A+Se/7RekQe1h1HkIwG/XLwo7HIG7fs4Nnn52gww1Lg= -github.com/stolostron/go-template-utils/v3 v3.3.0/go.mod h1:D9uOV787ztGsIqh4FxZzkPfBQgy2zz9vK0cv8uDO5vM= -github.com/stolostron/kubernetes-dependency-watches v0.3.0 h1:6Ko9bj1EvtavgkpKQdpG1fOgtVkMNBzr2QsBsLNvWdo= -github.com/stolostron/kubernetes-dependency-watches v0.3.0/go.mod h1:u2NLFgX12/XwNJvxBpqEhfwGwx8dHWGPxzpDy5L3DIk= +github.com/stolostron/go-template-utils/v4 v4.0.0 h1:gvSfhXIoymo5Ql9MH/ofTTOtBVkaUBq8HokCGR4xkkc= +github.com/stolostron/go-template-utils/v4 v4.0.0/go.mod h1:svIOPUJpG/ObRn3WwZMBGMEMsBgKH8LVfhsaIArgAAQ= +github.com/stolostron/kubernetes-dependency-watches v0.5.1 h1:NZ9N5/VWtwKcawgg4TGI4A5+weSkLrXZMjU7w91xfvU= +github.com/stolostron/kubernetes-dependency-watches v0.5.1/go.mod h1:8vRsL1GGBw0jjCwP8CH8d30NVNYKhUy0rdBSQZ2znx8= github.com/stolostron/multicloud-operators-subscription v1.2.4-0-20211122-7277a37.0.20230502154953-3ef589e142b4 h1:84Yzeji2klz4ajM0pSXoSrbRIC8a36EK2t5Cg8GgApY= github.com/stolostron/multicloud-operators-subscription v1.2.4-0-20211122-7277a37.0.20230502154953-3ef589e142b4/go.mod h1:0YDADTwQiNoLc7ihyHhTaCNAxx9VSVvrTUQf3W+AyGk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -273,8 +271,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/main.go b/main.go index 20a3a457..285ccd9a 100644 --- a/main.go +++ b/main.go @@ -16,13 +16,13 @@ import ( "github.com/go-logr/zapr" "github.com/spf13/pflag" "github.com/stolostron/go-log-utils/zaputil" + templates "github.com/stolostron/go-template-utils/v4/pkg/templates" k8sdepwatches "github.com/stolostron/kubernetes-dependency-watches/client" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" k8sruntime "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/klog/v2" @@ -250,6 +250,7 @@ func main() { controllerCtx := ctrl.SetupSignalHandler() + // This is used to trigger reconciles when a related policy set changes due to a dependency on a policy set. dynamicWatcherReconciler, dynamicWatcherSource := k8sdepwatches.NewControllerRuntimeSource() dynamicWatcher, err := k8sdepwatches.New(cfg, dynamicWatcherReconciler, nil) @@ -292,11 +293,29 @@ func main() { os.Exit(1) } + templateResolver, templatesSource, err := templates.NewResolverWithCaching( + controllerCtx, + cfg, + templates.Config{ + AdditionalIndentation: 8, + DisabledFunctions: []string{}, + StartDelim: propagatorctrl.TemplateStartDelim, + StopDelim: propagatorctrl.TemplateStopDelim, + }, + ) + if err != nil { + log.Error(err, "Unable to setup the template resolver the controller", "controller", "replicated-policy") + os.Exit(1) + } + if err = (&propagatorctrl.ReplicatedPolicyReconciler{ Propagator: propagator, ResourceVersions: replicatedResourceVersions, DynamicWatcher: dynamicWatcher, - }).SetupWithManager(mgr, replPolicyMaxConcurrency, dynamicWatcherSource, replicatedUpdatesSource); err != nil { + TemplateResolver: templateResolver, + }).SetupWithManager( + mgr, replPolicyMaxConcurrency, dynamicWatcherSource, replicatedUpdatesSource, templatesSource, + ); err != nil { log.Error(err, "Unable to create the controller", "controller", "replicated-policy") os.Exit(1) } @@ -367,11 +386,6 @@ func main() { os.Exit(1) } - // Setup config and client for propagator to talk to the apiserver - var generatedClient kubernetes.Interface = kubernetes.NewForConfigOrDie(mgr.GetConfig()) - - propagatorctrl.Initialize(cfg, &generatedClient) - cache := mgr.GetCache() // The following index for the PlacementRef Name is being added to the