From 4718c342148ff599b0688a87a7b27a9f58d0eb85 Mon Sep 17 00:00:00 2001 From: Justin Kulikauskas Date: Tue, 7 Nov 2023 10:42:20 -0500 Subject: [PATCH] Handle other changes to PlacementBinding To reduce the number of unnecessary reconciles on replicated policies, when a PlacementBinding is updated, only the added or removed policies are reconciled. However, this meant that when other fields are updated in a PlacementBinding, the affected policies were not being updated accordingly. Now, when there are other changes, all possibly affected replicated policies will be reconciled. Refs: - https://issues.redhat.com/browse/ACM-8515 Signed-off-by: Justin Kulikauskas (cherry picked from commit a06836bba155f9ca30edd0ded268d38d222baf3f) --- .../replicatepolicy_pb_eventHandler.go | 17 +++++- ...ase16_selective_policy_enforcement_test.go | 57 ++++++++++++++++++- 2 files changed, 72 insertions(+), 2 deletions(-) diff --git a/controllers/propagator/replicatepolicy_pb_eventHandler.go b/controllers/propagator/replicatepolicy_pb_eventHandler.go index b14f2784..1b0917ea 100644 --- a/controllers/propagator/replicatepolicy_pb_eventHandler.go +++ b/controllers/propagator/replicatepolicy_pb_eventHandler.go @@ -49,7 +49,22 @@ func (e *handlerForBinding) Update(ctx context.Context, oldRepPolcies := common.GetRepPoliciesInPlacementBinding(ctx, e.c, oldObj) newRepPolcies := common.GetRepPoliciesInPlacementBinding(ctx, e.c, newObj) - // Send only affected replicated policies + // If the BindingOverrides or subFilter has changed, all new and old policies need to be checked + if newObj.BindingOverrides.RemediationAction != oldObj.BindingOverrides.RemediationAction || + newObj.SubFilter != oldObj.SubFilter { + for _, obj := range newRepPolcies { + q.Add(obj) + } + + // The workqueue will handle any de-duplication. + for _, obj := range oldRepPolcies { + q.Add(obj) + } + + return + } + + // Otherwise send only affected replicated policies affectedRepPolicies := common.GetAffectedObjs(oldRepPolcies, newRepPolcies) for _, obj := range affectedRepPolicies { q.Add(obj) diff --git a/test/e2e/case16_selective_policy_enforcement_test.go b/test/e2e/case16_selective_policy_enforcement_test.go index a06a18c8..f3afc2c7 100644 --- a/test/e2e/case16_selective_policy_enforcement_test.go +++ b/test/e2e/case16_selective_policy_enforcement_test.go @@ -76,7 +76,7 @@ var _ = Describe("Test selective policy enforcement", Ordered, func() { ) Expect(err).ToNot(HaveOccurred()) - By("Verifying no replicated policy created on cluster ns managed3") + By("Verifying no replicated policy created on cluster ns managed3 because of the subfilter") Eventually(func() interface{} { replicatedPlc := utils.GetWithTimeout( clientHubDynamic, gvrPolicy, testNamespace+"."+case16PolicyName, @@ -192,5 +192,60 @@ var _ = Describe("Test selective policy enforcement", Ordered, func() { return rootPlc.Object["status"].(map[string]interface{})["placement"] }, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(expectedPlacementStatus)) }) + + It("should propagate to all clusters when the subfilter is removed", func() { + By("Putting all clusters in the case16-test-policy-plr-enforce decisions") + plr := utils.GetWithTimeout( + clientHubDynamic, gvrPlacementRule, case16PolicyName+"-plr-enforce", + testNamespace, true, defaultTimeoutSeconds, + ) + plr.Object["status"] = utils.GeneratePlrStatus("managed1", "managed2", "managed3") + _, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus( + context.TODO(), plr, metav1.UpdateOptions{}, + ) + Expect(err).ToNot(HaveOccurred()) + + By("Verifying that the policy on managed3 is still not created") + Consistently(func() interface{} { + return utils.GetWithTimeout( + clientHubDynamic, gvrPolicy, testNamespace+"."+case16PolicyName, + "managed3", false, defaultTimeoutSeconds, + ) + }, "5s", 1).Should(BeNil()) + + By("Removing the subfilter from the binding") + utils.Kubectl("patch", "placementbinding", "case16-test-policy-pb-enforce", "-n", testNamespace, + "--type=json", `-p=[{"op":"remove","path":"/subFilter"}]`) + + By("Verifying the policies exist and are enforcing on all 3 clusters") + for _, clustername := range []string{"managed1", "managed2", "managed3"} { + Eventually(func() interface{} { + replicatedPlc := utils.GetWithTimeout( + clientHubDynamic, gvrPolicy, testNamespace+"."+case16PolicyName, + clustername, true, defaultTimeoutSeconds, + ) + + return replicatedPlc.Object["spec"].(map[string]interface{})["remediationAction"] + }, defaultTimeoutSeconds, 1).Should(Equal("enforce")) + } + }) + + It("should change remediationAction when the bindingOverrides are removed", func() { + By("Removing the overrides from the binding") + utils.Kubectl("patch", "placementbinding", "case16-test-policy-pb-enforce", "-n", testNamespace, + "--type=json", `-p=[{"op":"remove","path":"/bindingOverrides"}]`) + + By("Verifying the policies are informing on all 3 clusters") + for _, clustername := range []string{"managed1", "managed2", "managed3"} { + Eventually(func() interface{} { + replicatedPlc := utils.GetWithTimeout( + clientHubDynamic, gvrPolicy, testNamespace+"."+case16PolicyName, + clustername, true, defaultTimeoutSeconds, + ) + + return replicatedPlc.Object["spec"].(map[string]interface{})["remediationAction"] + }, defaultTimeoutSeconds, 1).Should(Equal("inform")) + } + }) }) })