diff --git a/.prow.yaml b/.prow.yaml index f9b990b01f6..e7315baf9f6 100644 --- a/.prow.yaml +++ b/.prow.yaml @@ -1255,6 +1255,46 @@ presubmits: limits: memory: 6Gi + ######################################################### + # opa e2e tests + ######################################################### + + - name: pre-kubermatic-opa-e2e + run_if_changed: "(go.mod|go.sum|pkg/|.prow.yaml)" + decorate: true + clone_uri: "ssh://git@github.com/kubermatic/kubermatic.git" + labels: + preset-digitalocean: "true" + preset-kubeconfig-ci: "true" + preset-docker-pull: "true" + preset-docker-push: "true" + preset-kind-volume-mounts: "true" + preset-vault: "true" + preset-goproxy: "true" + spec: + containers: + - image: quay.io/kubermatic/e2e-kind:with-conformance-tests-v1.0.23 + command: + - "./hack/ci/run-opa-e2e-tests.sh" + env: + - name: VERSION_TO_TEST + value: v1.20.2 + - name: KUBERMATIC_EDITION + value: ee + - name: SERVICE_ACCOUNT_KEY + valueFrom: + secretKeyRef: + name: e2e-ci + key: serviceAccountSigningKey + securityContext: + privileged: true + resources: + requests: + memory: 4Gi + cpu: 2 + limits: + memory: 6Gi + ######################################################### # misc ######################################################### diff --git a/hack/ci/README.md b/hack/ci/README.md index a57c010a14f..91a5546b56f 100644 --- a/hack/ci/README.md +++ b/hack/ci/README.md @@ -103,6 +103,11 @@ environment. TODO: This needs to be cleaned up greatly and adjusted to the KKP Operator. The presubmit job for this script is currently not used. +## run-opa-e2e-tests.sh + +This script sets up a local KKP installation in kind, deploys a +couple of test Presets and Users and then runs the OPA e2e tests. + ## setup-kind-cluster.sh TBD diff --git a/hack/ci/run-opa-e2e-tests.sh b/hack/ci/run-opa-e2e-tests.sh new file mode 100755 index 00000000000..69e7c8dc366 --- /dev/null +++ b/hack/ci/run-opa-e2e-tests.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# Copyright 2021 The Kubermatic Kubernetes Platform contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### This script sets up a local KKP installation in kind, deploys a +### couple of test Presets and Users and then runs the OPA e2e tests. + +set -euo pipefail + +cd $(dirname $0)/../.. +source hack/lib.sh + +TEST_NAME="Pre-warm Go build cache" +echodate "Attempting to pre-warm Go build cache" + +beforeGocache=$(nowms) +make download-gocache +pushElapsed gocache_download_duration_milliseconds $beforeGocache + +export KIND_CLUSTER_NAME="${SEED_NAME:-kubermatic}" + +source hack/ci/setup-kind-cluster.sh +source hack/ci/setup-kubermatic-in-kind.sh + +echodate "Creating UI DigitalOcean preset..." +cat << EOF > preset-digitalocean.yaml +apiVersion: kubermatic.k8s.io/v1 +kind: Preset +metadata: + name: e2e-digitalocean + namespace: kubermatic +spec: + digitalocean: + token: ${DO_E2E_TESTS_TOKEN} +EOF +retry 2 kubectl apply -f preset-digitalocean.yaml + +echodate "Creating roxy2 user..." +cat << EOF > user.yaml +apiVersion: kubermatic.k8s.io/v1 +kind: User +metadata: + name: c41724e256445bf133d6af1168c2d96a7533cd437618fdbe6dc2ef1fee97acd3 +spec: + email: roxy2@loodse.com + id: 1413636a43ddc27da27e47614faedff24b4ab19c9d9f2b45dd1b89d9_KUBE + name: roxy2 + admin: true +EOF +retry 2 kubectl apply -f user.yaml + +echodate "Running opa tests..." +go test -timeout 30m -tags e2e -v ./pkg/test/e2e/opa -kubeconfig "$KUBECONFIG" +echodate "Tests completed successfully!" diff --git a/pkg/test/e2e/opa/opa_test.go b/pkg/test/e2e/opa/opa_test.go new file mode 100644 index 00000000000..542ea32cfb8 --- /dev/null +++ b/pkg/test/e2e/opa/opa_test.go @@ -0,0 +1,330 @@ +// +build e2e + +/* +Copyright 2021 The Kubermatic Kubernetes Platform contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opa + +import ( + "context" + "flag" + "fmt" + "strings" + "testing" + "time" + + constrainttemplatev1beta1 "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1beta1" + + kubermaticv1 "k8c.io/kubermatic/v2/pkg/crd/kubermatic/v1" + "k8c.io/kubermatic/v2/pkg/test/e2e/utils" + "k8s.io/client-go/kubernetes/scheme" + + corev1 "k8s.io/api/core/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/clientcmd" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + datacenter = "kubermatic" + location = "do-fra1" + version = utils.KubernetesVersion() + credential = "e2e-digitalocean" + kubeconfig = flag.String("kubeconfig", "", "kubeconfig for the Seed cluster") + ctKind = "RequiredLabels" +) + +func TestOPAIntegration(t *testing.T) { + ctx := context.Background() + + if err := constrainttemplatev1beta1.AddToSchemes.AddToScheme(scheme.Scheme); err != nil { + t.Fatalf("failed to register gatekeeper scheme: %v", err) + } + + // validate kubeconfig by creating a client + if kubeconfig == nil { + t.Fatal("-kubeconfig must be specified and pointing to the Seed cluster") + } + + t.Log("creating client for Seed cluster...") + config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) + if err != nil { + t.Fatalf("failed to create Seed cluster client: %v", err) + } + + client, err := ctrlruntimeclient.New(config, ctrlruntimeclient.Options{Scheme: scheme.Scheme}) + if err != nil { + t.Fatalf("failed to create Seed cluster client: %v", err) + } + + // login + masterToken, err := utils.RetrieveMasterToken(ctx) + if err != nil { + t.Fatalf("failed to get master token: %v", err) + } + testClient := utils.NewTestClient(masterToken, t) + + // create dummy project + t.Log("creating project...") + project, err := testClient.CreateProject(rand.String(10)) + if err != nil { + t.Fatalf("failed to create project: %v", err) + } + defer cleanupProject(t, project.ID) + + t.Log("creating cluster...") + apiCluster, err := testClient.CreateDOCluster(project.ID, datacenter, rand.String(10), credential, version, location, 0) + if err != nil { + t.Fatalf("failed to create cluster: %v", err) + } + + // wait for the cluster to become healthy + if err := testClient.WaitForClusterHealthy(project.ID, datacenter, apiCluster.ID); err != nil { + t.Fatalf("cluster did not become healthy: %v", err) + } + + // get the cluster object (the CRD, not the API's representation) + cluster := &kubermaticv1.Cluster{} + if err := client.Get(ctx, types.NamespacedName{Name: apiCluster.ID}, cluster); err != nil { + t.Fatalf("failed to get cluster: %v", err) + } + + // enable OPA + t.Log("enabling OPA...") + if err := setOPAIntegration(ctx, client, cluster, true); err != nil { + t.Fatalf("failed to set OPA integration to true: %v", err) + } + + t.Log("waiting for cluster to healthy after enabling OPA...") + if err := testClient.WaitForOPAEnabledClusterHealthy(project.ID, datacenter, apiCluster.ID); err != nil { + t.Fatalf("cluster not ready: %v", err) + } + + // Create CT + t.Log("creating Constraint Template...") + ct, err := createCT(ctx, client) + if err != nil { + t.Fatalf("error creating Constraint Template: %v", err) + } + + // Check CT on user cluster + t.Log("creating client for user cluster...") + userClient, err := testClient.GetUserClusterClient(datacenter, project.ID, apiCluster.ID) + if err != nil { + t.Fatalf("error creating user cluster client: %v", err) + } + if err := waitForCTSync(ctx, userClient, ct.Name, false); err != nil { + t.Fatal(err) + } + + // Create Constraint + t.Log("creating Constraint...") + constraint, err := createConstraint(ctx, client, cluster.Status.NamespaceName, ctKind) + if err != nil { + t.Fatalf("error creating Constraint: %v", err) + } + + // Check Constraint + t.Log("waiting for Constraint sync...") + if err := waitForConstraintSync(ctx, client, constraint.Name, constraint.Namespace, false); err != nil { + t.Fatal(err) + } + + // Test if constraint works + t.Log("testing if Constraint works by creating policy-breaking configmap...") + if err := testConstraintForConfigMap(ctx, userClient); err != nil { + t.Fatal(err) + } + + t.Log("testing if Constraint lets through policy-aligned namespace...") + cm := genTestConfigMap() + cm.Labels = map[string]string{"gatekeeper": "true"} + if err := userClient.Create(ctx, cm); err != nil { + t.Fatalf("error creating policy-aligned configmap on user cluster: %v", err) + } + + // Delete constraint + t.Log("Deleting Constraint...") + if err := client.Delete(ctx, constraint); err != nil { + t.Fatalf("error deleting Constraint: %v", err) + } + t.Log("waiting for Constraint sync delete...") + if err := waitForConstraintSync(ctx, client, constraint.Name, constraint.Namespace, true); err != nil { + t.Fatal(err) + } + + // Check that constraint does not work + t.Log("testing if policy breaking configmap can be created...") + cmBreaking := genTestConfigMap() + if err := userClient.Create(ctx, cmBreaking); err != nil { + t.Fatalf("error creating policy-breaking configmap on user cluster after deleting constraint: %v", err) + } + + // Delete CT + t.Log("deleting Constraint Template...") + if err := client.Delete(ctx, ct); err != nil { + t.Fatalf("error deleting Constraint Template: %v", err) + } + + // Check that CT is removed + t.Log("waiting for Constraint Template delete sync...") + if err := waitForCTSync(ctx, userClient, ct.Name, true); err != nil { + t.Fatal(err) + } + + // Disable OPA Integration + t.Log("disabling OPA...") + if err := setOPAIntegration(ctx, client, cluster, false); err != nil { + t.Fatalf("failed to set OPA integration to false: %v", err) + } + + // Check that cluster is healthy + t.Log("waiting for cluster to healthy after disabling OPA...") + if err := testClient.WaitForClusterHealthy(project.ID, datacenter, apiCluster.ID); err != nil { + t.Fatalf("cluster not healthy: %v", err) + } + + // Test that cluster deletes cleanly + testClient.CleanupCluster(t, project.ID, datacenter, apiCluster.ID) +} + +func setOPAIntegration(ctx context.Context, client ctrlruntimeclient.Client, cluster *kubermaticv1.Cluster, enabled bool) error { + oldCluster := cluster.DeepCopy() + cluster.Spec.OPAIntegration = &kubermaticv1.OPAIntegrationSettings{ + Enabled: enabled, + } + + return client.Patch(ctx, cluster, ctrlruntimeclient.MergeFrom(oldCluster)) +} + +func testConstraintForConfigMap(ctx context.Context, userClient ctrlruntimeclient.Client) error { + if !utils.WaitFor(1*time.Second, 1*time.Minute, func() bool { + cm := genTestConfigMap() + err := userClient.Create(ctx, cm) + return err != nil && strings.Contains(err.Error(), "you must provide labels") + }) { + return fmt.Errorf("timeout waiting for Constraint policy to be enforced") + } + return nil +} + +func waitForCTSync(ctx context.Context, userClient ctrlruntimeclient.Client, ctName string, deleted bool) error { + if !utils.WaitFor(1*time.Second, 1*time.Minute, func() bool { + gatekeeperCT := &constrainttemplatev1beta1.ConstraintTemplate{} + err := userClient.Get(ctx, types.NamespacedName{Name: ctName}, gatekeeperCT) + + if deleted { + return kerrors.IsNotFound(err) + } + return err == nil + }) { + return fmt.Errorf("timeout waiting for Constraint Template to be synced to user cluster") + } + return nil +} + +func waitForConstraintSync(ctx context.Context, client ctrlruntimeclient.Client, cName, namespace string, deleted bool) error { + if !utils.WaitFor(1*time.Second, 1*time.Minute, func() bool { + constraint := &kubermaticv1.Constraint{} + err := client.Get(ctx, types.NamespacedName{Name: cName, Namespace: namespace}, constraint) + if deleted { + return kerrors.IsNotFound(err) + } + return err == nil + }) { + return fmt.Errorf("timeout waiting for Constraint to be synced to user cluster") + } + return nil +} + +func createConstraint(ctx context.Context, client ctrlruntimeclient.Client, namespace, kind string) (*kubermaticv1.Constraint, error) { + c := &kubermaticv1.Constraint{} + c.Kind = kubermaticv1.ConstraintKind + c.Name = "testconstraint" + c.Namespace = namespace + c.Spec = kubermaticv1.ConstraintSpec{ + ConstraintType: kind, + Match: kubermaticv1.Match{ + Kinds: []kubermaticv1.Kind{ + {Kinds: []string{"ConfigMap"}, APIGroups: []string{""}}, + }, + }, + Parameters: kubermaticv1.Parameters{ + RawJSON: `{"labels":["gatekeeper"]}`, + }, + } + + return c, client.Create(ctx, c) +} + +func createCT(ctx context.Context, client ctrlruntimeclient.Client) (*kubermaticv1.ConstraintTemplate, error) { + ct := &kubermaticv1.ConstraintTemplate{} + ct.Name = "requiredlabels" + ct.Spec = kubermaticv1.ConstraintTemplateSpec{ + CRD: constrainttemplatev1beta1.CRD{ + Spec: constrainttemplatev1beta1.CRDSpec{ + Names: constrainttemplatev1beta1.Names{ + Kind: ctKind, + }, + Validation: &constrainttemplatev1beta1.Validation{ + OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ + "labels": { + Type: "array", + Items: &apiextensionsv1beta1.JSONSchemaPropsOrArray{ + Schema: &apiextensionsv1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + Targets: []constrainttemplatev1beta1.Target{ + { + Target: "admission.k8s.gatekeeper.sh", + Rego: "package requiredlabels\nviolation[{\"msg\": msg, \"details\": {\"missing_labels\": missing}}] {\n provided := {label | input.review.object.metadata.labels[label]}\n required := {label | label := input.parameters.labels[_]}\n missing := required - provided\n count(missing) > 0\n msg := sprintf(\"you must provide labels: %v\", [missing])\n}", + }, + }, + } + + return ct, client.Create(ctx, ct) +} + +func genTestConfigMap() *corev1.ConfigMap { + cm := &corev1.ConfigMap{} + cm.Namespace = corev1.NamespaceDefault + cm.Name = fmt.Sprintf("test-cm-%d", rand.Int()) + return cm +} + +func cleanupProject(t *testing.T, id string) { + t.Log("cleaning up project and cluster...") + + // use a dedicated context so that cleanups always run, even + // if the context inside a test was already cancelled + token, err := utils.RetrieveAdminMasterToken(context.Background()) + if err != nil { + t.Fatalf("failed to get master token: %v", err) + } + + utils.NewTestClient(token, t).CleanupProject(t, id) +} diff --git a/pkg/test/e2e/utils/client.go b/pkg/test/e2e/utils/client.go index f11981b613a..da46cbfe125 100644 --- a/pkg/test/e2e/utils/client.go +++ b/pkg/test/e2e/utils/client.go @@ -28,9 +28,15 @@ import ( "github.com/Masterminds/semver/v3" "github.com/go-openapi/runtime" httptransport "github.com/go-openapi/runtime/client" + "k8s.io/client-go/tools/clientcmd" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + + "k8s.io/client-go/kubernetes/scheme" + apiv1 "k8c.io/kubermatic/v2/pkg/api/v1" + apiv2 "k8c.io/kubermatic/v2/pkg/api/v2" kubermaticv1 "k8c.io/kubermatic/v2/pkg/crd/kubermatic/v1" apiclient "k8c.io/kubermatic/v2/pkg/test/e2e/utils/apiclient/client" "k8c.io/kubermatic/v2/pkg/test/e2e/utils/apiclient/client/admin" @@ -580,6 +586,8 @@ func (r *TestClient) GetClusterHealthStatus(projectID, dc, clusterID string) (*a apiClusterHealth.MachineController = convertHealthStatus(response.Payload.MachineController) apiClusterHealth.Scheduler = convertHealthStatus(response.Payload.Scheduler) apiClusterHealth.UserClusterControllerManager = convertHealthStatus(response.Payload.UserClusterControllerManager) + apiClusterHealth.GatekeeperController = convertHealthStatus(response.Payload.GatekeeperController) + apiClusterHealth.GatekeeperAudit = convertHealthStatus(response.Payload.GatekeeperAudit) return apiClusterHealth, nil } @@ -601,6 +609,26 @@ func (r *TestClient) WaitForClusterHealthy(projectID, dc, clusterID string) erro return nil } +func (r *TestClient) WaitForOPAEnabledClusterHealthy(projectID, dc, clusterID string) error { + timeout := 5 * time.Minute + before := time.Now() + + r.test.Logf("Waiting %v for OPA enabled cluster %s to become healthy...", timeout, clusterID) + + if !WaitFor(5*time.Second, timeout, func() bool { + healthStatus, _ := r.GetClusterHealthStatus(projectID, dc, clusterID) + return IsHealthyCluster(healthStatus) && + healthStatus.GatekeeperController == kubermaticv1.HealthStatusUp && + healthStatus.GatekeeperAudit == kubermaticv1.HealthStatusUp + + }) { + return errors.New("OPA enabled cluster did not become healthy") + } + + r.test.Logf("OPA enabled cluster became healthy after %v", time.Since(before)) + return nil +} + func convertHealthStatus(status models.HealthStatus) kubermaticv1.HealthStatus { switch int64(status) { case int64(kubermaticv1.HealthStatusProvisioning): @@ -1287,3 +1315,49 @@ func (r *TestClient) GetKubeconfig(dc, projectID, clusterID string) (string, err return string(conf.Payload), nil } + +func (r *TestClient) GetUserClusterClient(dc, projectID, clusterID string) (ctrlruntimeclient.Client, error) { + userClusterKubeconfig, err := r.GetKubeconfig(dc, projectID, clusterID) + if err != nil { + return nil, err + } + config, err := clientcmd.RESTConfigFromKubeConfig([]byte(userClusterKubeconfig)) + if err != nil { + return nil, err + } + + return ctrlruntimeclient.New(config, ctrlruntimeclient.Options{Scheme: scheme.Scheme}) +} + +// GetConstraint gets the constraint with the given name, project and cluster; it does not perform any +// retries if the API returns errors. +func (r *TestClient) GetConstraint(projectID, clusterID, name string) (*apiv2.Constraint, error) { + params := &project.GetConstraintParams{ + ProjectID: projectID, + ClusterID: clusterID, + Name: name, + } + + SetupRetryParams(r.test, params, Backoff{ + Duration: 1 * time.Second, + Steps: 4, + Factor: 1.5, + }) + + project, err := r.client.Project.GetConstraint(params, r.bearerToken) + if err != nil { + return nil, err + } + + return convertConstraint(project.Payload) +} + +func convertConstraint(constraint *models.Constraint) (*apiv2.Constraint, error) { + apiConstraint := &apiv2.Constraint{} + apiConstraint.Name = constraint.Name + apiConstraint.Spec = kubermaticv1.ConstraintSpec{ + ConstraintType: constraint.Spec.ConstraintType, + } + + return apiConstraint, nil +}