diff --git a/.github/actions/e2e/action.yml b/.github/actions/e2e/action.yml index b7c4461bb..811c3543c 100644 --- a/.github/actions/e2e/action.yml +++ b/.github/actions/e2e/action.yml @@ -37,6 +37,9 @@ inputs: k0s-version-previous: description: 'k0s previous version to expect in e2e tests' required: true + k0s-version-previous-stable: + description: 'k0s previous stable version to expect in e2e tests' + required: true version-specifier: description: 'the git sha or tag used to generate application version strings' required: true @@ -106,6 +109,7 @@ runs: export DR_AWS_SECRET_ACCESS_KEY=${{ inputs.dr-aws-secret-access-key }} export EXPECT_K0S_VERSION=${{ inputs.k0s-version }} export EXPECT_K0S_VERSION_PREVIOUS=${{ inputs.k0s-version-previous }} + export EXPECT_K0S_VERSION_PREVIOUS_STABLE=${{ inputs.k0s-version-previous-stable }} make e2e-test TEST_NAME=${{ inputs.test-name }} - name: Troubleshoot if: ${{ !cancelled() }} diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index bd7b734f8..f9fd1e03b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -255,6 +255,37 @@ jobs: echo "K0S_VERSION=\"$K0S_VERSION\"" echo "k0s_version=$K0S_VERSION" >> "$GITHUB_OUTPUT" + build-previous-stable: + name: Build previous stable + runs-on: embedded-cluster + needs: + - git-sha + outputs: + ec_version: ${{ steps.export.outputs.ec_version }} + k0s_version: ${{ steps.export.outputs.k0s_version }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Export k0s version + id: export + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + k0s_majmin_version="$(make print-PREVIOUS_K0S_VERSION | sed 's/v\([0-9]*\.[0-9]*\).*/\1/')" + EC_VERSION="$(gh release list --repo replicatedhq/embedded-cluster \ + --exclude-drafts --exclude-pre-releases --json name \ + --jq '.[] | .name' \ + | grep "k8s-${k0s_majmin_version}" \ + | head -n1)" + + gh release download "$EC_VERSION" --repo replicatedhq/embedded-cluster --pattern 'metadata.json' + K0S_VERSION="$(jq -r '.Versions.Kubernetes' metadata.json)" + + echo "EC_VERSION=\"$EC_VERSION\"" + echo "K0S_VERSION=\"$K0S_VERSION\"" + echo "ec_version=$EC_VERSION" >> "$GITHUB_OUTPUT" + echo "k0s_version=$K0S_VERSION" >> "$GITHUB_OUTPUT" + build-upgrade: name: Build upgrade runs-on: embedded-cluster @@ -387,6 +418,7 @@ jobs: - build-current - build-previous-k0s - build-upgrade + - build-previous-stable steps: - name: Checkout uses: actions/checkout@v4 @@ -406,6 +438,7 @@ jobs: REPLICATED_API_ORIGIN: "https://api.staging.replicated.com/vendor" APP_CHANNEL: CI USES_DEV_BUCKET: "0" + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | export SHORT_SHA=dev-${{ needs.git-sha.outputs.git_sha }} @@ -418,6 +451,12 @@ jobs: replicated release promote 11615 2cHXb1RCttzpR0xvnNWyaZCgDBP --version "${APP_VERSION}" replicated release promote 11615 2eAqMYG1IEtX8cwpaO1kgNV6EB3 --version "${APP_VERSION}" + # promote a release containing the previous stable version of embedded-cluster to test upgrades + export EC_VERSION="${{ needs.build-previous-stable.outputs.ec_version }}" + export APP_VERSION="appver-${SHORT_SHA}-previous-stable" + export RELEASE_YAML_DIR=e2e/kots-release-install-stable + ./scripts/ci-release-app.sh + # install the previous k0s version to ensure an upgrade occurs export EC_VERSION="$(git describe --tags --match='[0-9]*.[0-9]*.[0-9]*')-previous-k0s" export APP_VERSION="appver-${SHORT_SHA}-previous-k0s" @@ -452,6 +491,12 @@ jobs: run: | export SHORT_SHA=dev-${{ needs.git-sha.outputs.git_sha }} + # promote a release containing the previous stable version of embedded-cluster to test upgrades + export EC_VERSION="${{ needs.build-previous-stable.outputs.ec_version }}" + export APP_VERSION="appver-${SHORT_SHA}-previous-stable" + export RELEASE_YAML_DIR=e2e/kots-release-install-stable + ./scripts/ci-release-app.sh + # install the previous k0s version to ensure an upgrade occurs export EC_VERSION="$(git describe --tags --match='[0-9]*.[0-9]*.[0-9]*')-previous-k0s" export APP_VERSION="appver-${SHORT_SHA}-previous-k0s" @@ -505,6 +550,7 @@ jobs: - build-current - build-previous-k0s - build-upgrade + - build-previous-stable - release-app strategy: fail-fast: false @@ -519,6 +565,7 @@ jobs: - TestSingleNodeInstallationDebian11 - TestSingleNodeInstallationDebian12 - TestSingleNodeInstallationCentos9Stream + - TestSingleNodeUpgradePreviousStable - TestInstallFromReplicatedApp - TestUpgradeFromReplicatedApp - TestInstallWithoutEmbed @@ -578,6 +625,7 @@ jobs: DR_AWS_SECRET_ACCESS_KEY: ${{ secrets.TESTIM_AWS_SECRET_ACCESS_KEY }} EXPECT_K0S_VERSION: ${{ needs.build-current.outputs.k0s_version }} EXPECT_K0S_VERSION_PREVIOUS: ${{ needs.build-previous-k0s.outputs.k0s_version }} + EXPECT_K0S_VERSION_PREVIOUS_STABLE: ${{ needs.build-previous-stable.outputs.k0s_version }} run: | make e2e-test TEST_NAME=${{ matrix.test }} - name: Troubleshoot @@ -593,6 +641,7 @@ jobs: - build-current - build-previous-k0s - build-upgrade + - build-previous-stable - release-app - export-version-specifier strategy: @@ -614,6 +663,8 @@ jobs: runner: embedded-cluster - test: TestMultiNodeAirgapUpgradeSameK0s runner: embedded-cluster + - test: TestMultiNodeAirgapUpgradePreviousStable + runner: embedded-cluster - test: TestAirgapUpgradeFromEC18 runner: embedded-cluster - test: TestSingleNodeAirgapDisasterRecovery @@ -647,6 +698,7 @@ jobs: dr-aws-secret-access-key: ${{ secrets.TESTIM_AWS_SECRET_ACCESS_KEY }} k0s-version: ${{ needs.build-current.outputs.k0s_version }} k0s-version-previous: ${{ needs.build-previous-k0s.outputs.k0s_version }} + k0s-version-previous-stable: ${{ needs.build-previous-stable.outputs.k0s_version }} version-specifier: ${{ needs.export-version-specifier.outputs.version_specifier }} # this job will validate that all the tests passed diff --git a/e2e/install_test.go b/e2e/install_test.go index 4a1accb9f..33ca99bdf 100644 --- a/e2e/install_test.go +++ b/e2e/install_test.go @@ -485,6 +485,57 @@ func TestInstallFromReplicatedApp(t *testing.T) { t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) } +func TestSingleNodeUpgradePreviousStable(t *testing.T) { + t.Parallel() + + RequireEnvVars(t, []string{"SHORT_SHA"}) + + tc := docker.NewCluster(&docker.ClusterInput{ + T: t, + Nodes: 1, + Distro: "debian-bookworm", + }) + defer tc.Cleanup() + + t.Logf("%s: downloading embedded-cluster on node 0", time.Now().Format(time.RFC3339)) + line := []string{"vandoor-prepare.sh", fmt.Sprintf("appver-%s-previous-stable", os.Getenv("SHORT_SHA")), os.Getenv("LICENSE_ID"), "false"} + if stdout, stderr, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to download embedded-cluster on node 0: %v: %s: %s", err, stdout, stderr) + } + + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) + line = []string{"single-node-install.sh", "ui"} + if stdout, stderr, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to install embedded-cluster on node 0: %v: %s: %s", err, stdout, stderr) + } + + if stdout, stderr, err := tc.SetupPlaywrightAndRunTest("deploy-app"); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v: %s: %s", err, stdout, stderr) + } + + t.Logf("%s: checking installation state", time.Now().Format(time.RFC3339)) + line = []string{"check-installation-state.sh", fmt.Sprintf("appver-%s-previous-stable", os.Getenv("SHORT_SHA")), k8sVersionPreviousStable()} + if stdout, stderr, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to check installation state: %v: %s: %s", err, stdout, stderr) + } + + appUpgradeVersion := fmt.Sprintf("appver-%s-upgrade", os.Getenv("SHORT_SHA")) + testArgs := []string{appUpgradeVersion} + + t.Logf("%s: upgrading cluster", time.Now().Format(time.RFC3339)) + if stdout, stderr, err := tc.RunPlaywrightTest("deploy-upgrade", testArgs...); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v: %s: %s", err, stdout, stderr) + } + + t.Logf("%s: checking installation state after upgrade", time.Now().Format(time.RFC3339)) + line = []string{"check-postupgrade-state.sh", k8sVersion()} + if stdout, stderr, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to check postupgrade state: %v: %s: %s", err, stdout, stderr) + } + + t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) +} + func TestUpgradeFromReplicatedApp(t *testing.T) { t.Parallel() @@ -1513,6 +1564,152 @@ func TestMultiNodeAirgapUpgrade(t *testing.T) { t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) } +func TestMultiNodeAirgapUpgradePreviousStable(t *testing.T) { + t.Parallel() + + RequireEnvVars(t, []string{"SHORT_SHA", "AIRGAP_LICENSE_ID"}) + + t.Logf("%s: downloading airgap files", time.Now().Format(time.RFC3339)) + airgapInstallBundlePath := "/tmp/airgap-install-bundle.tar.gz" + airgapUpgradeBundlePath := "/tmp/airgap-upgrade-bundle.tar.gz" + runInParallel(t, + func(t *testing.T) error { + return downloadAirgapBundle(t, fmt.Sprintf("appver-%s-previous-stable", os.Getenv("SHORT_SHA")), airgapInstallBundlePath, os.Getenv("AIRGAP_LICENSE_ID")) + }, func(t *testing.T) error { + return downloadAirgapBundle(t, fmt.Sprintf("appver-%s-upgrade", os.Getenv("SHORT_SHA")), airgapUpgradeBundlePath, os.Getenv("AIRGAP_LICENSE_ID")) + }, + ) + + tc := lxd.NewCluster(&lxd.ClusterInput{ + T: t, + Nodes: 2, + Image: "debian/12", + WithProxy: true, + AirgapInstallBundlePath: airgapInstallBundlePath, + AirgapUpgradeBundlePath: airgapUpgradeBundlePath, + }) + defer tc.Cleanup() + + // install "curl" dependency on node 0 for app version checks. + tc.InstallTestDependenciesDebian(t, 0, true) + + // delete airgap bundles once they've been copied to the nodes + if err := os.Remove(airgapInstallBundlePath); err != nil { + t.Logf("failed to remove airgap install bundle: %v", err) + } + if err := os.Remove(airgapUpgradeBundlePath); err != nil { + t.Logf("failed to remove airgap upgrade bundle: %v", err) + } + + // upgrade airgap bundle is only needed on the first node + line := []string{"rm", "/assets/ec-release-upgrade.tgz"} + if _, _, err := tc.RunCommandOnNode(1, line); err != nil { + t.Fatalf("fail to remove upgrade airgap bundle on node %s: %v", tc.Nodes[1], err) + } + + t.Logf("%s: preparing embedded cluster airgap files on node 0", time.Now().Format(time.RFC3339)) + line = []string{"airgap-prepare.sh"} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to prepare airgap files on node %s: %v", tc.Nodes[0], err) + } + + t.Logf("%s: installing embedded-cluster on node 0", time.Now().Format(time.RFC3339)) + line = []string{"single-node-airgap-install.sh", "--local-artifact-mirror-port", "50001"} // choose an alternate lam port + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to install embedded-cluster on node %s: %v", tc.Nodes[0], err) + } + // remove the airgap bundle and binary after installation + line = []string{"rm", "/assets/release.airgap"} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to remove airgap bundle on node %s: %v", tc.Nodes[0], err) + } + line = []string{"rm", "/usr/local/bin/embedded-cluster"} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to remove embedded-cluster binary on node %s: %v", tc.Nodes[0], err) + } + + if _, _, err := tc.SetupPlaywrightAndRunTest("deploy-app"); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v", err) + } + + // generate worker node join command. + t.Logf("%s: generating a new worker token command", time.Now().Format(time.RFC3339)) + stdout, stderr, err := tc.RunPlaywrightTest("get-join-worker-command") + if err != nil { + t.Fatalf("fail to generate worker join token:\nstdout: %s\nstderr: %s", stdout, stderr) + } + workerCommand, err := findJoinCommandInOutput(stdout) + if err != nil { + t.Fatalf("fail to find the join command in the output: %v", err) + } + t.Log("worker join token command:", workerCommand) + + // join the worker node + t.Logf("%s: preparing embedded cluster airgap files on worker node", time.Now().Format(time.RFC3339)) + line = []string{"airgap-prepare.sh"} + if _, _, err := tc.RunCommandOnNode(1, line); err != nil { + t.Fatalf("fail to prepare airgap files on worker node: %v", err) + } + t.Logf("%s: joining worker node to the cluster", time.Now().Format(time.RFC3339)) + if _, _, err := tc.RunCommandOnNode(1, strings.Split(workerCommand, " ")); err != nil { + t.Fatalf("fail to join worker node to the cluster: %v", err) + } + // remove the airgap bundle and binary after joining + line = []string{"rm", "/assets/release.airgap"} + if _, _, err := tc.RunCommandOnNode(1, line); err != nil { + t.Fatalf("fail to remove airgap bundle on worker node: %v", err) + } + line = []string{"rm", "/usr/local/bin/embedded-cluster"} + if _, _, err := tc.RunCommandOnNode(1, line); err != nil { + t.Fatalf("fail to remove embedded-cluster binary on worker node: %v", err) + } + + // wait for the nodes to report as ready. + t.Logf("%s: all nodes joined, waiting for them to be ready", time.Now().Format(time.RFC3339)) + stdout, _, err = tc.RunCommandOnNode(0, []string{"wait-for-ready-nodes.sh", "2"}) + if err != nil { + t.Log(stdout) + t.Fatalf("fail to wait for ready nodes: %v", err) + } + + t.Logf("%s: checking installation state after app deployment", time.Now().Format(time.RFC3339)) + line = []string{"check-airgap-installation-state.sh", fmt.Sprintf("appver-%s-previous-stable", os.Getenv("SHORT_SHA")), k8sVersionPreviousStable()} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to check installation state: %v", err) + } + + t.Logf("%s: running airgap update", time.Now().Format(time.RFC3339)) + line = []string{"airgap-update.sh"} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to run airgap update: %v", err) + } + // remove the airgap bundle and binary after upgrade + line = []string{"rm", "/assets/upgrade/release.airgap"} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to remove airgap bundle on node %s: %v", tc.Nodes[0], err) + } + line = []string{"rm", "/usr/local/bin/embedded-cluster-upgrade"} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to remove embedded-cluster-upgrade binary on node %s: %v", tc.Nodes[0], err) + } + + appUpgradeVersion := fmt.Sprintf("appver-%s-upgrade", os.Getenv("SHORT_SHA")) + testArgs := []string{appUpgradeVersion} + + t.Logf("%s: upgrading cluster", time.Now().Format(time.RFC3339)) + if _, _, err := tc.RunPlaywrightTest("deploy-upgrade", testArgs...); err != nil { + t.Fatalf("fail to run playwright test deploy-app: %v", err) + } + + t.Logf("%s: checking installation state after upgrade", time.Now().Format(time.RFC3339)) + line = []string{"check-postupgrade-state.sh", k8sVersion()} + if _, _, err := tc.RunCommandOnNode(0, line); err != nil { + t.Fatalf("fail to check postupgrade state: %v", err) + } + + t.Logf("%s: test complete", time.Now().Format(time.RFC3339)) +} + // This test creates 4 nodes, installs on the first one and then generate 2 join tokens // for controllers and one join token for worker nodes. Joins the nodes as HA and then waits // for them to report ready. Runs additional high availability validations afterwards. diff --git a/e2e/kots-release-install-stable/cluster-config.yaml b/e2e/kots-release-install-stable/cluster-config.yaml new file mode 100644 index 000000000..9686c854e --- /dev/null +++ b/e2e/kots-release-install-stable/cluster-config.yaml @@ -0,0 +1,71 @@ +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +metadata: + name: "testconfig" +spec: + version: "__version_string__" + roles: + controller: + labels: + controller-label: controller-label-value + name: controller-test + custom: + - labels: + abc-test-label: abc-test-label-value + abc-test-label-two: abc-test-label-value-2 + name: abc + - labels: + xyz-test-label: xyz-value + name: xyz + unsupportedOverrides: + builtInExtensions: + - name: admin-console + values: | + labels: + release-custom-label: release-clustom-value + - name: embedded-cluster-operator + values: | + global: + labels: + release-custom-label: release-clustom-value + k0s: | + config: + metadata: + name: foo + spec: + telemetry: + enabled: false + extensions: + helm: + repositories: + - name: ingress-nginx + url: https://kubernetes.github.io/ingress-nginx + - name: okgolove + url: https://okgolove.github.io/helm-charts/ + charts: + - name: ingress-nginx + chartname: ingress-nginx/ingress-nginx + namespace: ingress-nginx + version: "4.8.3" + values: | + controller: + service: + type: NodePort + nodePorts: + http: "80" + https: "443" + image: + digest: "" + digestChroot: "" + admissionWebhooks: + patch: + image: + digest: "" + - chartname: okgolove/goldpinger + name: goldpinger + namespace: goldpinger + version: 6.1.2 + order: 11 + values: | + image: + repository: proxy.replicated.com/anonymous/bloomberg/goldpinger diff --git a/e2e/kots-release-install-stable/config.yaml b/e2e/kots-release-install-stable/config.yaml new file mode 100644 index 000000000..f1c98e69a --- /dev/null +++ b/e2e/kots-release-install-stable/config.yaml @@ -0,0 +1,13 @@ +apiVersion: kots.io/v1beta1 +kind: Config +spec: + groups: + - name: config_group + title: The First Config Group + items: + - name: hostname + title: Hostname + type: text + - name: pw + title: Password + type: password diff --git a/e2e/kots-release-install-stable/deployment-2.yaml b/e2e/kots-release-install-stable/deployment-2.yaml new file mode 100644 index 000000000..8b1d67023 --- /dev/null +++ b/e2e/kots-release-install-stable/deployment-2.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: second + labels: + app: second + replicated.com/disaster-recovery: app +spec: + replicas: 0 + selector: + matchLabels: + app: second + template: + metadata: + labels: + app: second + spec: + containers: + - name: nginx + image: proxy.replicated.com/anonymous/nginx:1.24-alpine + resources: + limits: + memory: '32Mi' + cpu: '50m' diff --git a/e2e/kots-release-install-stable/deployment.yaml b/e2e/kots-release-install-stable/deployment.yaml new file mode 100644 index 000000000..27fd581fd --- /dev/null +++ b/e2e/kots-release-install-stable/deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: example + component: nginx + replicated.com/disaster-recovery: app +spec: + replicas: 1 + selector: + matchLabels: + app: example + component: nginx + template: + metadata: + labels: + app: example + component: nginx + spec: + containers: + - name: nginx + image: proxy.replicated.com/anonymous/nginx:1.24-alpine + resources: + limits: + memory: '64Mi' + cpu: '50m' + env: + - name: APP_SEQUENCE + value: "{{repl Cursor }}" + - name: APP_VERSION + value: "{{repl VersionLabel }}" + - name: APP_CHANNEL + value: "{{repl ChannelName }}" + - name: CONFIG_HOSTNAME + value: '{{repl ConfigOption "hostname" }}' + - name: CONFIG_PASSWORD + value: '{{repl ConfigOption "pw" }}' diff --git a/e2e/kots-release-install-stable/k8s-app.yaml b/e2e/kots-release-install-stable/k8s-app.yaml new file mode 100644 index 000000000..c5f625eae --- /dev/null +++ b/e2e/kots-release-install-stable/k8s-app.yaml @@ -0,0 +1,10 @@ +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "nginx" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://nginx" diff --git a/e2e/kots-release-install-stable/kots-app.yaml b/e2e/kots-release-install-stable/kots-app.yaml new file mode 100644 index 000000000..2cb389bb8 --- /dev/null +++ b/e2e/kots-release-install-stable/kots-app.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: nginx +spec: + title: Embedded Cluster Smoke Test Staging App + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png + allowRollback: true + statusInformers: + - deployment/nginx + ports: + - serviceName: "nginx" + servicePort: 80 + localPort: 8888 + applicationUrl: "http://nginx" diff --git a/e2e/kots-release-install-stable/kots-lint-config.yaml b/e2e/kots-release-install-stable/kots-lint-config.yaml new file mode 100644 index 000000000..ba99c8b92 --- /dev/null +++ b/e2e/kots-release-install-stable/kots-lint-config.yaml @@ -0,0 +1,90 @@ +apiVersion: kots.io/v1beta1 +kind: LintConfig +metadata: + name: default-lint-config +spec: + rules: + - name: missing-kind-field + level: "error" + - name: missing-api-version-field + level: "error" + - name: preflight-spec + level: "warn" + - name: config-spec + level: "warn" + - name: troubleshoot-spec + level: "warn" + - name: application-spec + level: "warn" + - name: application-icon + level: "warn" + - name: application-statusInformers + level: "warn" + - name: invalid-target-kots-version + level: "error" + - name: invalid-min-kots-version + level: "error" + - name: invalid-kubernetes-installer + level: "error" + - name: deprecated-kubernetes-installer-version + level: "warn" + - name: duplicate-kots-kind + level: "error" + - name: invalid-helm-release-name + level: "error" + - name: duplicate-helm-release-name + level: "error" + - name: replicas-1 + level: "info" + - name: privileged + level: "info" + - name: allow-privilege-escalation + level: "info" + - name: container-image-latest-tag + level: "info" + - name: container-image-local-image-name + level: "error" + - name: container-resources + level: "info" + - name: container-resource-limits + level: "info" + - name: container-resource-requests + level: "info" + - name: resource-limits-cpu + level: "info" + - name: resource-limits-memory + level: "info" + - name: resource-requests-cpu + level: "info" + - name: resource-requests-memory + level: "info" + - name: volumes-host-paths + level: "info" + - name: volume-docker-sock + level: "info" + - name: hardcoded-namespace + level: "info" + - name: may-contain-secrets + level: "info" + - name: config-option-invalid-type + level: "error" + - name: repeat-option-missing-template + level: "error" + - name: repeat-option-missing-valuesByGroup + level: "error" + - name: repeat-option-malformed-yamlpath + level: "error" + - name: config-option-password-type + level: "warn" + - name: config-option-not-found + level: "warn" + - name: config-option-is-circular + level: "error" + - name: config-option-not-repeatable + level: "error" + - name: config-option-when-is-invalid + level: "error" + - name: config-option-invalid-regex-validator + level: "error" + - name: config-option-regex-validator-invalid-type + level: "error" diff --git a/e2e/kots-release-install-stable/preflight.yaml b/e2e/kots-release-install-stable/preflight.yaml new file mode 100644 index 000000000..1bfdf2ee3 --- /dev/null +++ b/e2e/kots-release-install-stable/preflight.yaml @@ -0,0 +1,25 @@ +apiVersion: troubleshoot.replicated.com/v1beta1 +kind: Preflight +metadata: + name: preflight-checks +spec: + collectors: + - clusterInfo: {} + - clusterResources: {} + analyzers: + - customResourceDefinition: + customResourceDefinitionName: installations.embeddedcluster.replicated.com + checkName: Embedded Cluster Installation CRD exists + outcomes: + - fail: + message: The Embedded Cluster Installation CRD is not installed in the cluster. Please install the Embedded Cluster Operator. + - pass: + message: The Embedded Cluster Installation CRD is installed in the cluster. + - customResourceDefinition: + customResourceDefinitionName: configs.embeddedcluster.replicated.com + checkName: Embedded Cluster Config CRD exists + outcomes: + - fail: + message: The Embedded Cluster Config CRD is not installed in the cluster. Please install the Embedded Cluster Operator. + - pass: + message: The Embedded Cluster Config CRD is installed in the cluster. diff --git a/e2e/kots-release-install-stable/troubleshoot.yaml b/e2e/kots-release-install-stable/troubleshoot.yaml new file mode 100644 index 000000000..e817502d6 --- /dev/null +++ b/e2e/kots-release-install-stable/troubleshoot.yaml @@ -0,0 +1,8 @@ +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: preflight-checks +spec: + collectors: + - clusterInfo: {} + - clusterResources: {} diff --git a/e2e/utils.go b/e2e/utils.go index 6f0d36288..af24f2ed5 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -71,6 +71,15 @@ func k8sVersionPrevious() string { return verParts[0] } +func k8sVersionPreviousStable() string { + // split the version string (like 'v1.29.6+k0s.0') into the k8s version and the k0s revision + verParts := strings.Split(os.Getenv("EXPECT_K0S_VERSION_PREVIOUS_STABLE"), "+") + if len(verParts) < 2 { + panic(fmt.Sprintf("failed to parse previous stable k8s version %q", os.Getenv("EXPECT_K0S_VERSION_PREVIOUS_STABLE"))) + } + return verParts[0] +} + func runInParallel(t *testing.T, fns ...func(t *testing.T) error) { runInParallelOffset(t, time.Duration(0), fns...) }