diff --git a/.github/ISSUE_TEMPLATE/create_release_branch.md b/.github/ISSUE_TEMPLATE/create_release_branch.md new file mode 100644 index 000000000..fe63cb483 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/create_release_branch.md @@ -0,0 +1,162 @@ +--- +name: "[Runbook] Create release branch" +about: Create a new branch for a new stable Kubernetes release +--- + +#### Summary + +Make sure to follow the steps below and ensure all actions are completed and signed-off by one team member. + +#### Information + + +- **K8s version**: 1.xx + + +- **Owner**: + + +- **Reviewer**: + + +- **PR**: + +#### Actions + +The steps are to be followed in-order, each task must be completed by the person specified in **bold**. Do not perform any steps unless all previous ones have been signed-off. The **Reviewer** closes the issue once all steps are complete. + +- [ ] **Owner**: Add the assignee and reviewer as assignees to the GitHub issue +- [ ] **Owner**: Ensure that you are part of the ["containers" team](https://launchpad.net/~containers) +- [ ] **Owner**: Request a new `1.xx` Snapstore track for the snaps similar to the [snapstore track-request][]. + - #### Post template on https://discourse.charmhub.io/ + + **Title:** Request for 1.xx tracks for the k8s snap + + **Category:** store-requests + + **Body:** + + Hi, + + Could we please have a track "1.xx-classic" and "1.xx" for the respective K8s snap release? + + Thank you, $name + +- [ ] **Owner**: Create `release-1.xx-strict` branch from latest `autoupdate/strict` + - `git switch autoupdate/strict` + - `git pull` + - `git checkout -b release-1.xx-strict` + - `git push origin release-1.xx-strict` +- [ ] **Owner**: Create `release-1.xx` branch from latest `main` + - `git switch main` + - `git pull` + - `git checkout -b release-1.xx` + - `git push origin release-1.xx` +- [ ] **Owner**: Create `1.xx` branch from latest `master` in k8s-dqlite + - `git clone git@github.com:canonical/k8s-dqlite.git ~/tmp/release-1.xx` + - `pushd ~/tmp/release-1.xx` + - `git switch main` + - `git pull` + - `git checkout -b release-1.xx` + - `git push origin release-1.xx` + - `popd` + - `rm -rf ~/tmp/release-1.xx` +- [ ] **Owner**: Create `release-1.xx` branch from latest `main` in cilium-rocks + - `git clone git@github.com:canonical/cilium-rocks.git ~/tmp/release-1.xx` + - `pushd ~/tmp/release-1.xx` + - `git switch main` + - `git pull` + - `git checkout -b release-1.xx` + - `git push origin release-1.xx` + - `popd` + - `rm -rf ~/tmp/release-1.xx` +- [ ] **Owner**: Create `release-1.xx` branch from latest `main` in coredns-rock + - `git clone git@github.com:canonical/coredns-rock.git ~/tmp/release-1.xx` + - `pushd ~/tmp/release-1.xx` + - `git switch main` + - `git pull` + - `git checkout -b release-1.xx` + - `git push origin release-1.xx` + - `popd` + - `rm -rf ~/tmp/release-1.xx` +- [ ] **Owner**: Create `release-1.xx` branch from latest `main` in metrics-server-rock + - `git clone git@github.com:canonical/metrics-server-rock.git ~/tmp/release-1.xx` + - `pushd ~/tmp/release-1.xx` + - `git switch main` + - `git pull` + - `git checkout -b release-1.xx` + - `git push origin release-1.xx` + - `popd` + - `rm -rf ~/tmp/release-1.xx` +- [ ] **Owner**: Create `release-1.xx` branch from latest `main` in rawfile-localpv + - `git clone git@github.com:canonical/rawfile-localpv.git ~/tmp/release-1.xx` + - `pushd ~/tmp/release-1.xx` + - `git switch main` + - `git pull` + - `git checkout -b release-1.xx` + - `git push origin release-1.xx` + - `popd` + - `rm -rf ~/tmp/release-1.xx` +- [ ] **Reviewer**: Ensure `release-1.xx` branch is based on latest changes on `main` at the time of the release cut. +- [ ] **Reviewer**: Ensure `release-1.xx-strict` branch is based on latest changes on `autoupdate/strict` at the time of the release cut. +- [ ] **Owner**: Create PR to initialize `release-1.xx` branch: + - [ ] Update `KUBE_TRACK` to `1.xx` in [/build-scripts/components/kubernetes/version.sh][] + - [ ] Update `master` to `1.xx` in [/build-scripts/components/k8s-dqlite/version.sh][] + - [ ] Update `"main"` to `"release-1.xx"` in [/build-scripts/hack/generate-sbom.py][] + - [ ] `git commit -m 'Release 1.xx'` + - [ ] Create PR with the changes and request review from **Reviewer**. Make sure to update the issue `Information` section with a link to the PR. +- [ ] **Reviewer**: Review and merge PR to initialize branch. +- [ ] **Reviewer**: On merge, confirm [Auto-update strict branch] action runs to completion +- [ ] **Owner**: Create launchpad builders for `release-1.xx` + - [ ] Go to [lp:k8s][] and do **Import now** to pick up all latest changes. + - [ ] Under **Branches**, select `release-1.xx`, then **Create snap package** + - [ ] Set **Snap recipe name** to `k8s-snap-1.xx` + - [ ] Set **Owner** to `Canonical Kubernetes (containers)` + - [ ] Set **The project that this Snap is associated with** to `k8s` + - [ ] Set **Series** to Infer from snapcraft.yaml + - [ ] Set **Processors** to `AMD x86-64 (amd64)` and `ARM ARMv8 (arm64)` + - [ ] Enable **Automatically upload to store** + - [ ] Set **Registered store name** to `k8s` + - [ ] In **Store Channels**, set **Track** to `1.xx-classic` and **Risk** to `edge`. Leave **Branch** empty + - [ ] Click **Create snap package** at the bottom of the page. +- [ ] **Owner**: Create launchpad builders for `release-1.xx-strict` + - [ ] Return to [lp:k8s][]. + - [ ] Under **Branches**, select `release-1.xx-strict`, then **Create snap package** + - [ ] Set **Snap recipe name** to `k8s-snap-1.xx-strict` + - [ ] Set **Owner** to `Canonical Kubernetes (containers)` + - [ ] Set **The project that this Snap is associated with** to `k8s` + - [ ] Set **Series** to Infer from snapcraft.yaml + - [ ] Set **Processors** to `AMD x86-64 (amd64)` and `ARM ARMv8 (arm64)` + - [ ] Enable **Automatically upload to store** + - [ ] Set **Registered store name** to `k8s` + - [ ] In **Store Channels**, set **Track** to `1.xx` and **Risk** to `edge`. Leave **Branch** empty + - [ ] Click **Create snap package** at the bottom of the page. +- [ ] **Reviewer**: Ensure snap recipes are created in [lp:k8s/+snaps][] + - look for `k8s-snap-1.xx` + - look for `k8s-snap-1.xx-strict` + +#### After release + +- [ ] **Owner** follows up with the **Reviewer** and team about things to improve around the process. +- [ ] **Owner**: After a few weeks of stable CI, update default track to `1.xx/stable` via + - On the snap [releases page][], select `Track` > `1.xx` +- [ ] **Reviewer**: Ensure snap recipes are created in [lp:k8s/+snaps][] + + + +[Auto-update strict branch]: https://github.com/canonical/k8s-snap/actions/workflows/strict.yaml +[snapstore track-request]: https://forum.snapcraft.io/t/tracks-request-for-k8s-snap/39122/2 +[releases-page]: https://snapcraft.io/k8s/releases +[.github/workflows/cla.yaml]: ../workflows/cla.yaml +[.github/workflows/cron-jobs.yaml]: ../workflows/cron-jobs.yaml +[.github/workflows/go.yaml]: ../workflows/go.yaml +[.github/workflows/integration.yaml]: ..workflows/integration.yaml +[.github/workflows/python.yaml]: ../workflows/python.yaml +[.github/workflows/sbom.yaml]: ../workflows/sbom.yaml +[.github/workflows/strict-integration.yaml]: ../workflows/strict-integration.yaml +[.github/workflows/strict.yaml]: ..workflows/strict.yaml +[/build-scripts/components/kubernetes/version.sh]: ../../build-scripts/components/kubernetes/version.sh +[/build-scripts/components/k8s-dqlite/version.sh]: ../../build-scripts/components/k8s-dqlite/version.sh +[/build-scripts/hack/generate-sbom.py]: ../..//build-scripts/hack/generate-sbom.py +[lp:k8s]: https://code.launchpad.net/~cdk8s/k8s/+git/k8s-snap +[lp:k8s/+snaps]: https://launchpad.net/k8s/+snaps diff --git a/.github/workflows/cla.yaml b/.github/workflows/cla.yaml index ada0569c7..7538a88a6 100644 --- a/.github/workflows/cla.yaml +++ b/.github/workflows/cla.yaml @@ -2,7 +2,9 @@ name: cla-check on: pull_request: - branches: [main] + branches: + - main + - 'release-[0-9]+.[0-9]+' jobs: cla-check: diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 6d9459cbc..251f41f06 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -2,7 +2,12 @@ name: Go on: push: - branches: [main, autoupdate/strict] + branches: + - main + - autoupdate/strict + - 'release-[0-9]+.[0-9]+' + - 'release-[0-9]+.[0-9]+-strict' + pull_request: jobs: diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index 26792cc60..7e7264b06 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -2,9 +2,15 @@ name: Integration Tests on: push: - branches: [main, autoupdate/strict] + branches: + - main + - autoupdate/strict + - 'release-[0-9]+.[0-9]+' + - 'release-[0-9]+.[0-9]+-strict' pull_request: - branches: [main] + branches: + - main + - 'release-[0-9].[0-9]+' jobs: build: diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 149076770..22329a7c3 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -2,9 +2,15 @@ name: Python on: push: - branches: [main, autoupdate/strict] + branches: + - main + - autoupdate/strict + - 'release-[0-9]+.[0-9]+' + - 'release-[0-9]+.[0-9]+-strict' pull_request: - branches: [main] + branches: + - main + - 'release-[0-9]+.[0-9]+' jobs: lint: diff --git a/.github/workflows/sbom.yaml b/.github/workflows/sbom.yaml index 573575302..316f55b32 100644 --- a/.github/workflows/sbom.yaml +++ b/.github/workflows/sbom.yaml @@ -2,9 +2,15 @@ name: SBOM on: push: - branches: [main, autoupdate/strict] + branches: + - main + - autoupdate/strict + - 'release-[0-9]+.[0-9]+' + - 'release-[0-9]+.[0-9]+-strict' pull_request: - branches: [main] + branches: + - main + - 'release-[0-9]+.[0-9]+' jobs: build: diff --git a/.github/workflows/scorecard.yaml b/.github/workflows/scorecard.yaml new file mode 100644 index 000000000..f3a8eb6d7 --- /dev/null +++ b/.github/workflows/scorecard.yaml @@ -0,0 +1,73 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '43 6 * * *' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + with: + sarif_file: results.sarif diff --git a/.github/workflows/strict-integration.yaml b/.github/workflows/strict-integration.yaml index 5a490257a..5985ab93d 100644 --- a/.github/workflows/strict-integration.yaml +++ b/.github/workflows/strict-integration.yaml @@ -2,15 +2,40 @@ name: Integration Tests (Strict) on: push: - branches: [main] + branches: + - main + - 'release-[0-9]+.[0-9]+' pull_request: - branches: [main] + branches: + - main + - 'release-[0-9]+.[0-9]+' jobs: + prepare: + name: Prepare + runs-on: ubuntu-latest + outputs: + strict: ${{ steps.determine.outputs.strict }} + steps: + - name: Determine Strict branch + id: determine + env: + BRANCH: ${{ github.base_ref || github.ref }} + run: | + BRANCH=${BRANCH#refs/heads/} # strip off refs/heads/ if it exists + if [[ "${BRANCH}" == "main" ]]; then + echo "strict=autoupdate/strict" >> "$GITHUB_OUTPUT" + elif [[ "${BRANCH}" =~ "^release-[0-9]+\.[0-9]+$" ]]; then + echo "strict=${BRANCH}" >> "$GITHUB_OUTPUT" + else + echo "Failed to determine matching strict branch for ${BRANCH}" + echo "strict=" >> $GITHUB_OUTPUT + fi build: name: Build runs-on: ubuntu-20.04 - + needs: [ prepare ] + if: ${{ needs.prepare.outputs.strict }} steps: - name: Checking out repo uses: actions/checkout@v4 @@ -24,7 +49,7 @@ jobs: sudo snap install snapcraft --classic - name: Apply strict patch run: | - git checkout -b autoupdate/strict + git checkout -b ${{ needs.prepare.outputs.strict }} git config --global user.email k8s-bot@canonical.com git config --global user.name k8s-bot git am ./build-scripts/patches/strict/*.patch @@ -39,13 +64,13 @@ jobs: path: k8s-strict.snap test-integration: + needs: [ prepare, build ] + if: ${{ needs.prepare.outputs.strict }} name: Test ${{ matrix.os }} strategy: matrix: os: ["ubuntu:20.04"] runs-on: ubuntu-20.04 - needs: build - steps: - name: Check out code uses: actions/checkout@v4 diff --git a/.github/workflows/strict.yaml b/.github/workflows/strict.yaml index a9a1c33cd..bbce357c2 100644 --- a/.github/workflows/strict.yaml +++ b/.github/workflows/strict.yaml @@ -2,22 +2,46 @@ name: Auto-update strict branch on: push: - branches: [main] + branches: + - main + - 'release-[0-9]+.[0-9]+' jobs: + prepare: + name: Prepare + runs-on: ubuntu-latest + outputs: + strict: ${{ steps.determine.outputs.strict }} + steps: + - name: Determine Strict branch + id: determine + env: + BRANCH: ${{ github.ref }} + run: | + BRANCH=${BRANCH#refs/heads/} # strip off refs/heads/ if it exists + if [[ "${BRANCH}" == "main" ]]; then + echo "strict=autoupdate/strict" >> "$GITHUB_OUTPUT" + elif [[ "${BRANCH}" =~ "^release-[0-9]+\.[0-9]+$" ]]; then + echo "strict=${BRANCH}" >> "$GITHUB_OUTPUT" + else + echo "Failed to determine matching strict branch for ${BRANCH}" + echo "strict=" >> $GITHUB_OUTPUT + fi update: runs-on: ubuntu-20.04 + needs: [ prepare ] + if: ${{ needs.prepare.outputs.strict }} steps: - - name: Checking out repo + - name: Sync ${{ github.ref }} to ${{ needs.prepare.outputs.strict }} uses: actions/checkout@v4 with: ssh-key: ${{ secrets.DEPLOY_KEY_TO_UPDATE_STRICT_BRANCH }} - name: Apply strict patch run: | - git checkout -b autoupdate/strict + git checkout -b ${{ needs.prepare.outputs.strict }} git config --global user.email k8s-bot@canonical.com git config --global user.name k8s-bot git am ./build-scripts/patches/strict/*.patch - - name: Push to autoupdate/strict branch + - name: Push to ${{ needs.prepare.outputs.strict }} run: | - git push origin --force autoupdate/strict + git push origin --force ${{ needs.prepare.outputs.strict }} diff --git a/README.md b/README.md index 093871e66..efc1e8ff2 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,11 @@ # Canonical Kubernetes Snap [![End to End Tests](https://github.com/canonical/k8s-snap/actions/workflows/integration.yaml/badge.svg)](https://github.com/canonical/k8s-snap/actions/workflows/integration.yaml) +![](https://img.shields.io/badge/Kubernetes-1.30-326de6.svg) + [![Get it from the Snap Store](https://snapcraft.io/static/images/badges/en/snap-store-black.svg)](https://snapcraft.io/k8s) + + **Canonical Kubernetes** is the fastest, easiest way to deploy a fully-conformant Kubernetes cluster. Harnessing pure upstream Kubernetes, this distribution adds the missing pieces (e.g. ingress, dns, networking) for a zero-ops experience. For more information and instructions, please see the official documentation at: https://ubuntu.com/kubernetes diff --git a/build-scripts/hack/generate-sbom.py b/build-scripts/hack/generate-sbom.py index 9ea4128e7..bc4818d2c 100755 --- a/build-scripts/hack/generate-sbom.py +++ b/build-scripts/hack/generate-sbom.py @@ -39,7 +39,14 @@ RAWFILE_LOCALPV_REPO = "https://github.com/canonical/rawfile-localpv" RAWFILE_LOCALPV_TAG = "main" SNAPCRAFT_C_COMPONENTS = ["libmnl", "libnftnl", "iptables"] -SNAPCRAFT_GO_COMPONENTS = ["runc", "containerd", "cni", "helm", "kubernetes", "k8s-dqlite"] +SNAPCRAFT_GO_COMPONENTS = [ + "runc", + "containerd", + "cni", + "helm", + "kubernetes", + "k8s-dqlite", +] K8S_DIR = DIR / "../../src/k8s" @@ -166,6 +173,7 @@ def rock_cilium(manifest, extra_files): LOG.info("Generating SBOM info for Cilium rocks") with _git_repo(CILIUM_ROCK_REPO, CILIUM_ROCK_TAG) as d: + rock_repo_commit = _parse_output(["git", "rev-parse", "HEAD"], cwd=d) rockcraft = (d / "cilium/rockcraft.yaml").read_text() operator_rockcraft = (d / "cilium-operator-generic/rockcraft.yaml").read_text() @@ -186,8 +194,21 @@ def rock_cilium(manifest, extra_files): # NOTE: this silently assumes that cilium and cilium-operator-generic rocks are in sync manifest["rocks"]["cilium"] = { + "rock-source": { + "type": "git", + "repo": CILIUM_ROCK_REPO, + "tag": CILIUM_ROCK_TAG, + "revision": rock_repo_commit, + }, "language": "go", - "details": ["cilium/rockcraft.yaml", "cilium-operator-generic/rockcraft.yaml"], + "details": [ + "cilium/rockcraft.yaml", + "cilium/go.mod", + "cilium/go.sum", + "cilium-operator-generic/rockcraft.yaml", + "cilium-operator-generic/go.mod", + "cilium-operator-generic/go.sum", + ], "source": { "type": "git", "repo": repo_url, @@ -201,6 +222,7 @@ def rock_coredns(manifest, extra_files): LOG.info("Generating SBOM info for CoreDNS rock") with _git_repo(COREDNS_ROCK_REPO, COREDNS_ROCK_TAG) as d: + rock_repo_commit = _parse_output(["git", "rev-parse", "HEAD"], cwd=d) rockcraft = (d / "rockcraft.yaml").read_text() extra_files["coredns/rockcraft.yaml"] = rockcraft @@ -215,6 +237,12 @@ def rock_coredns(manifest, extra_files): extra_files["coredns/go.sum"] = _read_file(dir / "go.sum") manifest["rocks"]["coredns"] = { + "rock-source": { + "type": "git", + "repo": COREDNS_ROCK_REPO, + "tag": COREDNS_ROCK_TAG, + "revision": rock_repo_commit, + }, "language": "go", "details": ["coredns/rockcraft.yaml", "coredns/go.mod", "coredns/go.sum"], "source": { @@ -230,6 +258,7 @@ def rock_metrics_server(manifest, extra_files): LOG.info("Generating SBOM info for metrics-server rock") with _git_repo(METRICS_SERVER_ROCK_REPO, METRICS_SERVER_ROCK_TAG) as d: + rock_repo_commit = _parse_output(["git", "rev-parse", "HEAD"], cwd=d) rockcraft = (d / "rockcraft.yaml").read_text() extra_files["metrics-server/rockcraft.yaml"] = rockcraft @@ -244,6 +273,12 @@ def rock_metrics_server(manifest, extra_files): extra_files["metrics-server/go.sum"] = _read_file(dir / "go.sum") manifest["rocks"]["metrics-server"] = { + "rock-source": { + "type": "git", + "repo": METRICS_SERVER_ROCK_REPO, + "tag": METRICS_SERVER_ROCK_TAG, + "revision": rock_repo_commit, + }, "language": "go", "details": [ "metrics-server/rockcraft.yaml", @@ -275,6 +310,12 @@ def rock_rawfile_localpv(manifest, extra_files): extra_files["rawfile-localpv/requirements.txt"] = requirements manifest["rocks"]["rawfile-localpv"] = { + "rock-source": { + "type": "git", + "repo": repo_url, + "tag": repo_tag, + "revision": repo_commit, + }, "language": "python", "details": [ "rawfile-localpv/rockcraft.yaml", diff --git a/build-scripts/hack/update-gateway-api-chart.sh b/build-scripts/hack/update-gateway-api-chart.sh index bdcdc0d9b..eb2b9a91b 100755 --- a/build-scripts/hack/update-gateway-api-chart.sh +++ b/build-scripts/hack/update-gateway-api-chart.sh @@ -1,6 +1,6 @@ #!/bin/bash -VERSION="v0.7.1" +VERSION="v1.0.0" DIR=`realpath $(dirname "${0}")` CHARTS_PATH="$DIR/../../k8s/components/charts" @@ -16,6 +16,7 @@ rm -rf gateway-api/templates/* rm -rf gateway-api/charts cp gateway-api-src/config/crd/standard/* gateway-api/templates/ cp gateway-api-src/config/crd/experimental/gateway.networking.k8s.io_tlsroutes.yaml gateway-api/templates/ +cp gateway-api-src/config/crd/experimental/gateway.networking.k8s.io_grpcroutes.yaml gateway-api/templates/ sed -i 's/^\(version: \).*$/\1'"${VERSION:1}"'/' gateway-api/Chart.yaml sed -i 's/^\(appVersion: \).*$/\1'"${VERSION:1}"'/' gateway-api/Chart.yaml sed -i 's/^\(description: \).*$/\1'"A Helm Chart containing Gateway API CRDs"'/' gateway-api/Chart.yaml diff --git a/docs/src/_parts/commands/k8s.md b/docs/src/_parts/commands/k8s.md index cf6bdf7b9..618e28f91 100644 --- a/docs/src/_parts/commands/k8s.md +++ b/docs/src/_parts/commands/k8s.md @@ -6,8 +6,8 @@ Canonical Kubernetes CLI ``` -h, --help help for k8s - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_bootstrap.md b/docs/src/_parts/commands/k8s_bootstrap.md index f9573f1d6..562e131e4 100644 --- a/docs/src/_parts/commands/k8s_bootstrap.md +++ b/docs/src/_parts/commands/k8s_bootstrap.md @@ -14,7 +14,7 @@ k8s bootstrap [flags] ``` --address string microcluster address, defaults to the node IP address - --config string path to the YAML file containing your custom cluster bootstrap configuration. + --file string path to the YAML file containing your custom cluster bootstrap configuration. -h, --help help for bootstrap --interactive interactively configure the most important cluster options --name string node name, defaults to hostname @@ -23,8 +23,8 @@ k8s bootstrap [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_completion.md b/docs/src/_parts/commands/k8s_completion.md index b9c1ae843..16d363b92 100644 --- a/docs/src/_parts/commands/k8s_completion.md +++ b/docs/src/_parts/commands/k8s_completion.md @@ -17,8 +17,8 @@ See each sub-command's help for details on how to use the generated script. ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_completion_bash.md b/docs/src/_parts/commands/k8s_completion_bash.md index 6be9f618f..7090a558d 100644 --- a/docs/src/_parts/commands/k8s_completion_bash.md +++ b/docs/src/_parts/commands/k8s_completion_bash.md @@ -40,8 +40,8 @@ k8s completion bash ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_completion_fish.md b/docs/src/_parts/commands/k8s_completion_fish.md index fcbd21e1e..9d244e63b 100644 --- a/docs/src/_parts/commands/k8s_completion_fish.md +++ b/docs/src/_parts/commands/k8s_completion_fish.md @@ -31,8 +31,8 @@ k8s completion fish [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_completion_powershell.md b/docs/src/_parts/commands/k8s_completion_powershell.md index 7a8a06801..7904604ab 100644 --- a/docs/src/_parts/commands/k8s_completion_powershell.md +++ b/docs/src/_parts/commands/k8s_completion_powershell.md @@ -28,8 +28,8 @@ k8s completion powershell [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_completion_zsh.md b/docs/src/_parts/commands/k8s_completion_zsh.md index e4aef6386..586c3d637 100644 --- a/docs/src/_parts/commands/k8s_completion_zsh.md +++ b/docs/src/_parts/commands/k8s_completion_zsh.md @@ -42,8 +42,8 @@ k8s completion zsh [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_config.md b/docs/src/_parts/commands/k8s_config.md index 8fda5e5b9..8dd685df9 100644 --- a/docs/src/_parts/commands/k8s_config.md +++ b/docs/src/_parts/commands/k8s_config.md @@ -16,8 +16,8 @@ k8s config [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_disable.md b/docs/src/_parts/commands/k8s_disable.md index 914d5428c..6f305cc97 100644 --- a/docs/src/_parts/commands/k8s_disable.md +++ b/docs/src/_parts/commands/k8s_disable.md @@ -19,8 +19,8 @@ k8s disable ... [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_enable.md b/docs/src/_parts/commands/k8s_enable.md index edf9e2243..204f078e8 100644 --- a/docs/src/_parts/commands/k8s_enable.md +++ b/docs/src/_parts/commands/k8s_enable.md @@ -19,8 +19,8 @@ k8s enable ... [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_get-join-token.md b/docs/src/_parts/commands/k8s_get-join-token.md index e5e0be461..706fa5879 100644 --- a/docs/src/_parts/commands/k8s_get-join-token.md +++ b/docs/src/_parts/commands/k8s_get-join-token.md @@ -16,8 +16,8 @@ k8s get-join-token [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_get.md b/docs/src/_parts/commands/k8s_get.md index 70a945025..2cd8cbe94 100644 --- a/docs/src/_parts/commands/k8s_get.md +++ b/docs/src/_parts/commands/k8s_get.md @@ -19,8 +19,8 @@ k8s get [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_join-cluster.md b/docs/src/_parts/commands/k8s_join-cluster.md index 8362631d0..ae8b918cc 100644 --- a/docs/src/_parts/commands/k8s_join-cluster.md +++ b/docs/src/_parts/commands/k8s_join-cluster.md @@ -17,8 +17,8 @@ k8s join-cluster [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_kubectl.md b/docs/src/_parts/commands/k8s_kubectl.md index 1119ef7a1..131470387 100644 --- a/docs/src/_parts/commands/k8s_kubectl.md +++ b/docs/src/_parts/commands/k8s_kubectl.md @@ -15,8 +15,8 @@ k8s kubectl [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_remove-node.md b/docs/src/_parts/commands/k8s_remove-node.md index 0fb7e111d..0e8b7fb5e 100644 --- a/docs/src/_parts/commands/k8s_remove-node.md +++ b/docs/src/_parts/commands/k8s_remove-node.md @@ -16,8 +16,8 @@ k8s remove-node [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_set.md b/docs/src/_parts/commands/k8s_set.md index 6f89093f5..9a5836ca9 100644 --- a/docs/src/_parts/commands/k8s_set.md +++ b/docs/src/_parts/commands/k8s_set.md @@ -20,8 +20,8 @@ k8s set ... [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/_parts/commands/k8s_status.md b/docs/src/_parts/commands/k8s_status.md index fbe07e029..d34d669e4 100644 --- a/docs/src/_parts/commands/k8s_status.md +++ b/docs/src/_parts/commands/k8s_status.md @@ -16,8 +16,8 @@ k8s status [flags] ### Options inherited from parent commands ``` - -o, --output-format string set the output format to one of plain, json or yaml (default "plain") - -t, --timeout duration the max time to wait for the command to execute (default 1m30s) + --output-format string set the output format to one of plain, json or yaml (default "plain") + --timeout duration the max time to wait for the command to execute (default 1m30s) ``` ### SEE ALSO diff --git a/docs/src/assets/arch.dsl b/docs/src/assets/arch.dsl index 97ec4aa45..7bdd1e9ef 100644 --- a/docs/src/assets/arch.dsl +++ b/docs/src/assets/arch.dsl @@ -3,14 +3,14 @@ workspace "Canonical K8s Workspace" { model { - admin = person "K8s Admin" "Responsible for the K8s cluster, has elevated permissions" - user = person "K8s User" "Interact with the workloads hosted in K8s" + admin = person "K8s Admin" + user = person "K8s User" charm = softwareSystem "Charm K8s" "Orchestrating the lifecycle management of K8s" - external_lb = softwareSystem "Load Balancer" "External LB, offered by the substrate (cloud)" "Extern" - storage = softwareSystem "Storage" "External storage, offered by the substrate (cloud)" "Extern" - iam = softwareSystem "Identity management system" "External identity system, offered by the substrate (cloud)" "Extern" - external_datastore = softwareSystem "External datastore" "postgress or etcd" "Extern" + external_lb = softwareSystem "Load Balancer" "External LB, offered by the substrate (cloud). Could be replaced by any alternative solution." "Extern" + storage = softwareSystem "Storage" "External storage, offered by the substrate (cloud). Could be replaced by any storage solution." "Extern" + iam = softwareSystem "Identity Management System" "The external identity system, offered by the substrate (cloud). Could be replaced by any alternative system." "Extern" + external_datastore = softwareSystem "External datastore" "etcd" "Extern" k8s_snap = softwareSystem "K8s Snap Distribution" "The Kubernetes distribution in a snap" { @@ -47,6 +47,7 @@ workspace "Canonical K8s Workspace" { } admin -> cli "Administers the cluster" + admin -> charm "Manages cluster's lifecycle" admin -> kubectl "Uses to manage the cluster" user -> loadbalancer "Interact with workloads hosted in K8s" charm -> api "Orchestrates the lifecycle management of K8s" @@ -54,8 +55,8 @@ workspace "Canonical K8s Workspace" { k8s_snap -> storage "Hosted workloads use storage" k8s_snap -> iam "Users identity is retrieved" - k8s_dqlite -> external_datastore "May be replaced by" "Any" "Runtime" - loadbalancer -> external_lb "May be replaced by" "Any" "Runtime" + k8s_dqlite -> external_datastore "Stores cluster data" "" "Runtime" + loadbalancer -> external_lb "Routes client requests" "" "Runtime" cluster_manager -> systemd "Configures" @@ -170,4 +171,4 @@ workspace "Canonical K8s Workspace" { } } -} \ No newline at end of file +} diff --git a/docs/src/assets/charms-architecture.puml b/docs/src/assets/charms-architecture.puml new file mode 100644 index 000000000..372518235 --- /dev/null +++ b/docs/src/assets/charms-architecture.puml @@ -0,0 +1,36 @@ +@startuml +set separator none +title Juju - Containers + +top to bottom direction + +!include +!include +!include + +Person(Administrator, "Administrator", $descr="", $tags="", $link="") + +System_Boundary("Juju_boundary", "Juju", $tags="") { + Container(Juju.JujuController, "Juju Controller", $techn="Snap Package", $descr="", $tags="", $link="") + Container(Juju.JujuClient, "Juju Client", $techn="Snap Package", $descr="", $tags="", $link="") + Container(Juju.CompatibleCharms, "Compatible Charms", $techn="", $descr="Other Compatible Canonical Charms", $tags="", $link="") + Container(Juju.K8s, "K8s", $techn="Charmed Operator", $descr="K8s Charm", $tags="", $link="") + Container(Juju.K8sRelationData, "K8s Relation Data", $techn="", $descr="", $tags="", $link="") + Container(Juju.K8sWorker, "K8s Worker", $techn="Charmed Operator", $descr="K8s Worker Charm", $tags="", $link="") + Container(Juju.K8sWorkerRelationData, "K8s Worker Relation Data", $techn="Juju Relation Databag", $descr="", $tags="", $link="") +} + +Rel(Juju.K8sWorker, Juju.K8sWorkerRelationData, "Reads from and writes to", $techn="", $tags="", $link="") +Rel(Juju.K8sWorkerRelationData, Juju.K8sWorker, "Retrieves Peer Data", $techn="", $tags="", $link="") +Rel(Juju.JujuController, Juju.K8s, "Manages", $techn="", $tags="", $link="") +Rel(Juju.JujuController, Juju.K8sWorker, "Manages", $techn="", $tags="", $link="") +Rel(Administrator, Juju.JujuClient, "Uses", $techn="", $tags="", $link="") +Rel(Juju.JujuClient, Juju.JujuController, "Manages", $techn="", $tags="", $link="") +Rel(Juju.K8s, Juju.CompatibleCharms, "Integrates with", $techn="", $tags="", $link="") +Rel(Juju.K8sWorker, Juju.CompatibleCharms, "Integrates with", $techn="", $tags="", $link="") +Rel(Juju.K8s, Juju.K8sRelationData, "Reads from and writes to", $techn="", $tags="", $link="") +Rel(Juju.K8sRelationData, Juju.K8s, "Retrieves Peer Data", $techn="", $tags="", $link="") +Rel(Juju.K8s, Juju.K8sWorkerRelationData, "Share Cluster Data", $techn="", $tags="", $link="") + +SHOW_LEGEND(true) +@enduml diff --git a/docs/src/assets/overview.puml b/docs/src/assets/overview.puml index 120bfb12e..4e838b56c 100644 --- a/docs/src/assets/overview.puml +++ b/docs/src/assets/overview.puml @@ -7,22 +7,23 @@ top to bottom direction !include !include -Person(K8sAdmin, "K8s Admin", $descr="Responsible for the K8s cluster, has elevated permissions", $tags="", $link="") -Person(K8sUser, "K8s User", $descr="Interact with the workloads hosted in K8s", $tags="", $link="") -System(CharmK8s, "Charm K8s", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") -System(LoadBalancer, "Load Balancer", $descr="External LB, offered by the substrate (cloud)", $tags="", $link="") -System(Storage, "Storage", $descr="External storage, offered by the substrate (cloud)", $tags="", $link="") -System(Identitymanagementsystem, "Identity management system", $descr="External identity system, offered by the substrate (cloud)", $tags="", $link="") -System(Externaldatastore, "External datastore", $descr="postgress or etcd", $tags="", $link="") +Person(K8sAdmin, "K8s Admin", $descr="", $tags="", $link="") +Person(K8sUser, "K8s User", $descr="", $tags="", $link="") +System(CharmK8s, "Charm K8s", $descr="", $tags="", $link="") +System(LoadBalancer, "Load Balancer", $descr="External LB, offered by the substrate (cloud). Could be replaced by any alternative solution.", $tags="", $link="") +System(Storage, "Storage", $descr="External storage, offered by the substrate (cloud). Could be replaced by any storage solution.", $tags="", $link="") +System(IdentityManagementSystem, "Identity Management System", $descr="The external identity system, offered by the substrate (cloud). Could be replaced by any alternative system.", $tags="", $link="") +System(Externaldatastore, "External datastore", $descr="etcd", $tags="", $link="") System(K8sSnapDistribution, "K8s Snap Distribution", $descr="The Kubernetes distribution in a snap", $tags="", $link="") -Rel(K8sAdmin, K8sSnapDistribution, "Sets up and configured the cluster", $techn="", $tags="", $link="") +Rel(K8sAdmin, K8sSnapDistribution, "Sets up and configures the cluster", $techn="", $tags="", $link="") +Rel(K8sAdmin, CharmK8s, "Manages cluster's lifecycle", $techn="", $tags="", $link="") Rel(K8sUser, K8sSnapDistribution, "Interacts with workloads hosted in K8s", $techn="", $tags="", $link="") Rel(CharmK8s, K8sSnapDistribution, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") Rel(K8sSnapDistribution, Storage, "Hosted workloads use storage", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution, Identitymanagementsystem, "Users identity is retrieved", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution, Externaldatastore, "May be replaced by", $techn="Any", $tags="", $link="") -Rel(K8sSnapDistribution, LoadBalancer, "May be replaced by", $techn="Any", $tags="", $link="") +Rel(K8sSnapDistribution, IdentityManagementSystem, "Retrieves users identity", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution, Externaldatastore, "Stores cluster data", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution, LoadBalancer, "Routes client requests", $techn="", $tags="", $link="") SHOW_LEGEND(true) -@enduml \ No newline at end of file +@enduml diff --git a/docs/src/reference/architecture.md b/docs/src/reference/architecture.md index a2ca71754..3d494c57c 100644 --- a/docs/src/reference/architecture.md +++ b/docs/src/reference/architecture.md @@ -102,6 +102,43 @@ This functionality is exposed via the following interfaces: - The **API**: The API over HTTP serves the CLI and is also used to programmatically drive the Kubernetes cluster. +## Canonical K8s charms + +Canonical `k8s` Charms encompass two primary components: the [`k8s` charm][K8s charm] and the [`k8s-worker` charm][K8s-worker charm]. + +```{kroki} ../assets/charms-architecture.puml +``` + +Charms are instantiated on a machine as a Juju unit, and a collection of units +constitutes an application. Both `k8s` and `k8s-worker` units are responsible +for installing and managing its machine's `k8s` snap, however the charm type determines +the node's role in the Kubernetes cluster. The `k8s` charm manages `control-plane` nodes, +whereas the `k8s-worker` charm manages Kubernetes `worker` nodes. The administrator manages +the cluster via the `juju` client, directing the `juju` controller to reach the model's +eventually consistent state. For more detail on Juju's concepts, see the [Juju docs][]. + +The administrator may choose any supported cloud-types (Openstack, MAAS, AWS, GCP, Azure...) on +which to manage the machines making up the Kubernetes cluster. Juju selects a single leader unit +per application to act as a centralised figure with the model. The `k8s` leader oversees Kubernetes +bootstrapping and enlistment of new nodes. Follower `k8s` units will join the cluster using +secrets shared through relation data from the leader. The entire lifecycle of the deployment +is orchestrated by the `k8s` charm, with tokens and cluster-related information being exchanged +through Juju relation data. + +Furthermore, the `k8s-worker` unit functions exclusively as a worker within the cluster, establishing +a relation with the `k8s` leader unit and requesting tokens and cluster-related information through +relation data. The `k8s` leader is responsible for issuing these tokens and revoking them if +a unit administratively departs the cluster. + +The `k8s` charm also supports the integration of other compatible charms, enabling integrations +such as connectivity with an external `etcd` datastore and the sharing of observability data with the +[`Canonical Observability Stack (COS)`][COS docs]. This modular and integrated approach facilitates +a robust and flexible Canonical Kubernetes deployment managed through Juju. + + -[C4 model]: https://c4model.com/ -[K8s charm]: https://charmhub.io/k8s +[C4 model]: https://c4model.com/ +[K8s charm]: https://charmhub.io/k8s +[K8s-Worker charm]: https://charmhub.io/k8s-worker +[Juju docs]: https://juju.is/docs/juju +[COS docs]: https://ubuntu.com/observability \ No newline at end of file diff --git a/k8s/args/k8sd b/k8s/args/k8sd index 7ad435cb9..fbdf3c83a 100644 --- a/k8s/args/k8sd +++ b/k8s/args/k8sd @@ -1,2 +1 @@ ---port=6400 --state-dir=${SNAP_COMMON}/var/lib/k8sd/state diff --git a/k8s/components/charts/cilium-1.14.1.tgz b/k8s/components/charts/cilium-1.14.1.tgz deleted file mode 100644 index 719e27caa..000000000 Binary files a/k8s/components/charts/cilium-1.14.1.tgz and /dev/null differ diff --git a/k8s/components/charts/cilium-1.15.2.tgz b/k8s/components/charts/cilium-1.15.2.tgz new file mode 100644 index 000000000..6bf08bd03 Binary files /dev/null and b/k8s/components/charts/cilium-1.15.2.tgz differ diff --git a/k8s/components/charts/ck-loadbalancer/Chart.yaml b/k8s/components/charts/ck-loadbalancer/Chart.yaml index 62c410230..003115cc7 100644 --- a/k8s/components/charts/ck-loadbalancer/Chart.yaml +++ b/k8s/components/charts/ck-loadbalancer/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.1.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.1.0" +appVersion: "0.1.1" diff --git a/k8s/components/charts/ck-loadbalancer/templates/lb-ip-pool.yaml b/k8s/components/charts/ck-loadbalancer/templates/lb-ip-pool.yaml index 7f2bc9d12..044512b55 100644 --- a/k8s/components/charts/ck-loadbalancer/templates/lb-ip-pool.yaml +++ b/k8s/components/charts/ck-loadbalancer/templates/lb-ip-pool.yaml @@ -7,7 +7,7 @@ metadata: {{- include "ck-loadbalancer.labels" . | nindent 4 }} spec: {{- with .Values.ipPool.cidrs }} - cidrs: + blocks: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} diff --git a/k8s/components/charts/gateway-api-0.7.1.tgz b/k8s/components/charts/gateway-api-0.7.1.tgz deleted file mode 100644 index 1a17be725..000000000 Binary files a/k8s/components/charts/gateway-api-0.7.1.tgz and /dev/null differ diff --git a/k8s/components/charts/gateway-api-1.0.0.tgz b/k8s/components/charts/gateway-api-1.0.0.tgz new file mode 100644 index 000000000..7b84d44a5 Binary files /dev/null and b/k8s/components/charts/gateway-api-1.0.0.tgz differ diff --git a/k8s/lib.sh b/k8s/lib.sh index fd1bdef90..22c4b0cfa 100755 --- a/k8s/lib.sh +++ b/k8s/lib.sh @@ -69,6 +69,11 @@ k8s::remove::network() { do tc filter del dev $default_interface ${d} || true done + + rm -rf /var/run/cilium/cilium.pid + if [ -f /opt/cni/bin/cilium-dbg ]; then + /opt/cni/bin/cilium-dbg cleanup --all-state --force || true + fi } # [DANGER] Cleanup containers and runtime state. Note that the order of operations below is crucial. diff --git a/src/k8s/api/v1/bootstrap_config.go b/src/k8s/api/v1/bootstrap_config.go new file mode 100644 index 000000000..7c5a18ffd --- /dev/null +++ b/src/k8s/api/v1/bootstrap_config.go @@ -0,0 +1,55 @@ +package v1 + +import ( + "encoding/json" + "fmt" +) + +// BootstrapConfig is used to seed cluster configuration when bootstrapping a new cluster. +type BootstrapConfig struct { + // ClusterConfig + ClusterConfig UserFacingClusterConfig `json:"cluster-config,omitempty" yaml:"cluster-config,omitempty"` + + // Seed configuration for the control plane (flat on purpose). Empty values are ignored + PodCIDR *string `json:"pod-cidr,omitempty" yaml:"pod-cidr,omitempty"` + ServiceCIDR *string `json:"service-cidr,omitempty" yaml:"service-cidr,omitempty"` + DisableRBAC *bool `json:"disable-rbac,omitempty" yaml:"disable-rbac,omitempty"` + SecurePort *int `json:"secure-port,omitempty" yaml:"secure-port,omitempty"` + CloudProvider *string `json:"cloud-provider,omitempty" yaml:"cloud-provider,omitempty"` + K8sDqlitePort *int `json:"k8s-dqlite-port,omitempty" yaml:"k8s-dqlite-port,omitempty"` + DatastoreType *string `json:"datastore-type,omitempty" yaml:"datastore-type,omitempty"` + DatastoreServers []string `json:"datastore-servers,omitempty" yaml:"datastore-servers,omitempty"` + DatastoreCACert *string `json:"datastore-ca-crt,omitempty" yaml:"datastore-ca-crt,omitempty"` + DatastoreClientCert *string `json:"datastore-client-crt,omitempty" yaml:"datastore-client-crt,omitempty"` + DatastoreClientKey *string `json:"datastore-client-key,omitempty" yaml:"datastore-client-key,omitempty"` + + // Seed configuration for certificates + ExtraSANs []string `json:"extra-sans,omitempty" yaml:"extra-sans,omitempty"` +} + +func (b *BootstrapConfig) GetDatastoreType() string { return getField(b.DatastoreType) } +func (b *BootstrapConfig) GetDatastoreCACert() string { return getField(b.DatastoreCACert) } +func (b *BootstrapConfig) GetDatastoreClientCert() string { return getField(b.DatastoreClientCert) } +func (b *BootstrapConfig) GetDatastoreClientKey() string { return getField(b.DatastoreClientKey) } +func (b *BootstrapConfig) GetK8sDqlitePort() int { return getField(b.K8sDqlitePort) } + +// ToMicrocluster converts a BootstrapConfig to a map[string]string for use in microcluster. +func (b *BootstrapConfig) ToMicrocluster() (map[string]string, error) { + config, err := json.Marshal(b) + if err != nil { + return nil, fmt.Errorf("failed to marshal bootstrap config: %w", err) + } + + return map[string]string{ + "bootstrapConfig": string(config), + }, nil +} + +// BootstrapConfigFromMicrocluster parses a microcluster map[string]string and retrieves the BootstrapConfig. +func BootstrapConfigFromMicrocluster(m map[string]string) (BootstrapConfig, error) { + config := BootstrapConfig{} + if err := json.Unmarshal([]byte(m["bootstrapConfig"]), &config); err != nil { + return BootstrapConfig{}, fmt.Errorf("failed to unmarshal bootstrap config: %w", err) + } + return config, nil +} diff --git a/src/k8s/api/v1/bootstrap_config_test.go b/src/k8s/api/v1/bootstrap_config_test.go new file mode 100644 index 000000000..5afbc2c8b --- /dev/null +++ b/src/k8s/api/v1/bootstrap_config_test.go @@ -0,0 +1,59 @@ +package v1_test + +import ( + "testing" + + apiv1 "github.com/canonical/k8s/api/v1" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func TestBootstrapConfigToMicrocluster(t *testing.T) { + g := NewWithT(t) + + cfg := apiv1.BootstrapConfig{ + ClusterConfig: apiv1.UserFacingClusterConfig{ + Network: apiv1.NetworkConfig{ + Enabled: vals.Pointer(true), + }, + DNS: apiv1.DNSConfig{ + Enabled: vals.Pointer(true), + ClusterDomain: vals.Pointer("cluster.local"), + }, + Ingress: apiv1.IngressConfig{ + Enabled: vals.Pointer(true), + }, + LoadBalancer: apiv1.LoadBalancerConfig{ + Enabled: vals.Pointer(true), + L2Mode: vals.Pointer(true), + CIDRs: vals.Pointer([]string{"10.0.0.0/24"}), + }, + LocalStorage: apiv1.LocalStorageConfig{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("/storage/path"), + SetDefault: vals.Pointer(false), + }, + Gateway: apiv1.GatewayConfig{ + Enabled: vals.Pointer(true), + }, + MetricsServer: apiv1.MetricsServerConfig{ + Enabled: vals.Pointer(true), + }, + }, + PodCIDR: vals.Pointer("10.100.0.0/16"), + ServiceCIDR: vals.Pointer("10.200.0.0/16"), + DisableRBAC: vals.Pointer(false), + SecurePort: vals.Pointer(6443), + CloudProvider: vals.Pointer("external"), + K8sDqlitePort: vals.Pointer(9090), + DatastoreType: vals.Pointer("k8s-dqlite"), + ExtraSANs: []string{"custom.kubernetes"}, + } + + microclusterConfig, err := cfg.ToMicrocluster() + g.Expect(err).To(BeNil()) + + fromMicrocluster, err := apiv1.BootstrapConfigFromMicrocluster(microclusterConfig) + g.Expect(err).To(BeNil()) + g.Expect(fromMicrocluster).To(Equal(cfg)) +} diff --git a/src/k8s/api/v1/cluster_config.go b/src/k8s/api/v1/cluster_config.go index 43b95f030..e86c219a8 100644 --- a/src/k8s/api/v1/cluster_config.go +++ b/src/k8s/api/v1/cluster_config.go @@ -20,59 +20,89 @@ type UpdateClusterConfigResponse struct { } type UserFacingClusterConfig struct { - Network *NetworkConfig `json:"network,omitempty" yaml:"network,omitempty"` - DNS *DNSConfig `json:"dns,omitempty" yaml:"dns,omitempty"` - Ingress *IngressConfig `json:"ingress,omitempty" yaml:"ingress,omitempty"` - LoadBalancer *LoadBalancerConfig `json:"load-balancer,omitempty" yaml:"load-balancer,omitempty"` - LocalStorage *LocalStorageConfig `json:"local-storage,omitempty" yaml:"local-storage,omitempty"` - Gateway *GatewayConfig `json:"gateway,omitempty" yaml:"gateway,omitempty"` - MetricsServer *MetricsServerConfig `json:"metrics-server,omitempty" yaml:"metrics-server,omitempty"` + Network NetworkConfig `json:"network,omitempty" yaml:"network,omitempty"` + DNS DNSConfig `json:"dns,omitempty" yaml:"dns,omitempty"` + Ingress IngressConfig `json:"ingress,omitempty" yaml:"ingress,omitempty"` + LoadBalancer LoadBalancerConfig `json:"load-balancer,omitempty" yaml:"load-balancer,omitempty"` + LocalStorage LocalStorageConfig `json:"local-storage,omitempty" yaml:"local-storage,omitempty"` + Gateway GatewayConfig `json:"gateway,omitempty" yaml:"gateway,omitempty"` + MetricsServer MetricsServerConfig `json:"metrics-server,omitempty" yaml:"metrics-server,omitempty"` } type DNSConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` - ClusterDomain string `json:"cluster-domain,omitempty" yaml:"cluster-domain"` - ServiceIP string `json:"service-ip,omitempty" yaml:"service-ip"` - UpstreamNameservers []string `json:"upstream-nameservers,omitempty" yaml:"upstream-nameservers"` + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` + ClusterDomain *string `json:"cluster-domain,omitempty" yaml:"cluster-domain,omitempty"` + ServiceIP *string `json:"service-ip,omitempty" yaml:"service-ip,omitempty"` + UpstreamNameservers *[]string `json:"upstream-nameservers,omitempty" yaml:"upstream-nameservers,omitempty"` } +func (c DNSConfig) GetEnabled() bool { return getField(c.Enabled) } +func (c DNSConfig) GetClusterDomain() string { return getField(c.ClusterDomain) } +func (c DNSConfig) GetServiceIP() string { return getField(c.ServiceIP) } +func (c DNSConfig) GetUpstreamNameservers() []string { return getField(c.UpstreamNameservers) } + type IngressConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` - DefaultTLSSecret string `json:"default-tls-secret,omitempty" yaml:"default-tls-secret"` - EnableProxyProtocol *bool `json:"enable-proxy-protocol,omitempty" yaml:"enable-proxy-protocol"` + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` + DefaultTLSSecret *string `json:"default-tls-secret,omitempty" yaml:"default-tls-secret,omitempty"` + EnableProxyProtocol *bool `json:"enable-proxy-protocol,omitempty" yaml:"enable-proxy-protocol,omitempty"` } +func (c IngressConfig) GetEnabled() bool { return getField(c.Enabled) } +func (c IngressConfig) GetDefaultTLSSecret() string { return getField(c.DefaultTLSSecret) } +func (c IngressConfig) GetEnableProxyProtocol() bool { return getField(c.EnableProxyProtocol) } + type LoadBalancerConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` - CIDRs []string `json:"cidrs,omitempty" yaml:"cidrs"` - L2Enabled *bool `json:"l2-mode,omitempty" yaml:"l2-mode"` - L2Interfaces []string `json:"l2-interfaces,omitempty" yaml:"l2-interfaces"` - BGPEnabled *bool `json:"bgp-mode,omitempty" yaml:"bgp-mode"` - BGPLocalASN int `json:"bgp-local-asn,omitempty" yaml:"bgp-local-asn"` - BGPPeerAddress string `json:"bgp-peer-address,omitempty" yaml:"bgp-peer-address"` - BGPPeerASN int `json:"bgp-peer-asn,omitempty" yaml:"bgp-peer-asn"` - BGPPeerPort int `json:"bgp-peer-port,omitempty" yaml:"bgp-peer-port"` -} + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` + CIDRs *[]string `json:"cidrs,omitempty" yaml:"cidrs,omitempty"` + L2Mode *bool `json:"l2-mode,omitempty" yaml:"l2-mode,omitempty"` + L2Interfaces *[]string `json:"l2-interfaces,omitempty" yaml:"l2-interfaces,omitempty"` + BGPMode *bool `json:"bgp-mode,omitempty" yaml:"bgp-mode,omitempty"` + BGPLocalASN *int `json:"bgp-local-asn,omitempty" yaml:"bgp-local-asn,omitempty"` + BGPPeerAddress *string `json:"bgp-peer-address,omitempty" yaml:"bgp-peer-address,omitempty"` + BGPPeerASN *int `json:"bgp-peer-asn,omitempty" yaml:"bgp-peer-asn,omitempty"` + BGPPeerPort *int `json:"bgp-peer-port,omitempty" yaml:"bgp-peer-port,omitempty"` +} + +func (c LoadBalancerConfig) GetEnabled() bool { return getField(c.Enabled) } +func (c LoadBalancerConfig) GetCIDRs() []string { return getField(c.CIDRs) } +func (c LoadBalancerConfig) GetL2Mode() bool { return getField(c.L2Mode) } +func (c LoadBalancerConfig) GetL2Interfaces() []string { return getField(c.L2Interfaces) } +func (c LoadBalancerConfig) GetBGPMode() bool { return getField(c.BGPMode) } +func (c LoadBalancerConfig) GetBGPLocalASN() int { return getField(c.BGPLocalASN) } +func (c LoadBalancerConfig) GetBGPPeerAddress() string { return getField(c.BGPPeerAddress) } +func (c LoadBalancerConfig) GetBGPPeerASN() int { return getField(c.BGPPeerASN) } +func (c LoadBalancerConfig) GetBGPPeerPort() int { return getField(c.BGPPeerPort) } type LocalStorageConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` - LocalPath string `json:"local-path,omitempty" yaml:"local-path"` - ReclaimPolicy string `json:"reclaim-policy,omitempty" yaml:"reclaim-policy"` - SetDefault *bool `json:"set-default,omitempty" yaml:"set-default"` + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` + LocalPath *string `json:"local-path,omitempty" yaml:"local-path,omitempty"` + ReclaimPolicy *string `json:"reclaim-policy,omitempty" yaml:"reclaim-policy,omitempty"` + SetDefault *bool `json:"set-default,omitempty" yaml:"set-default,omitempty"` } +func (c LocalStorageConfig) GetEnabled() bool { return getField(c.Enabled) } +func (c LocalStorageConfig) GetLocalPath() string { return getField(c.LocalPath) } +func (c LocalStorageConfig) GetReclaimPolicy() string { return getField(c.ReclaimPolicy) } +func (c LocalStorageConfig) GetSetDefault() bool { return getField(c.SetDefault) } + type NetworkConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` } +func (c NetworkConfig) GetEnabled() bool { return getField(c.Enabled) } + type GatewayConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` } +func (c GatewayConfig) GetEnabled() bool { return getField(c.Enabled) } + type MetricsServerConfig struct { - Enabled *bool `json:"enabled,omitempty" yaml:"enabled"` + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` } +func (c MetricsServerConfig) GetEnabled() bool { return getField(c.Enabled) } + func (c UserFacingClusterConfig) String() string { b, err := yaml.Marshal(c) if err != nil { diff --git a/src/k8s/api/v1/types.go b/src/k8s/api/v1/types.go index b626453e5..9eef29b9b 100644 --- a/src/k8s/api/v1/types.go +++ b/src/k8s/api/v1/types.go @@ -4,60 +4,9 @@ import ( "fmt" "strings" - "github.com/canonical/k8s/pkg/utils/vals" "gopkg.in/yaml.v2" ) -type BootstrapConfig struct { - // Components are the components that should be enabled on bootstrap. - Components []string `yaml:"components"` - // ClusterCIDR is the CIDR of the cluster. - ClusterCIDR string `yaml:"cluster-cidr"` - // ServiceCIDR is the CIDR of the cluster services. - ServiceCIDR string `yaml:"service-cidr"` - // EnableRBAC determines if RBAC will be enabled; *bool to know true/false/unset. - EnableRBAC *bool `yaml:"enable-rbac"` - K8sDqlitePort int `yaml:"k8s-dqlite-port"` - Datastore string `yaml:"datastore"` - DatastoreURL string `yaml:"datastore-url,omitempty"` - DatastoreCACert string `yaml:"datastore-ca-crt,omitempty"` - DatastoreClientCert string `yaml:"datastore-client-crt,omitempty"` - DatastoreClientKey string `yaml:"datastore-client-key,omitempty"` - ExtraSANs []string `yaml:"extrasans,omitempty"` -} - -// SetDefaults sets the fields to default values. -func (b *BootstrapConfig) SetDefaults() { - b.Components = []string{"dns", "metrics-server", "network", "gateway"} - b.ClusterCIDR = "10.1.0.0/16" - b.ServiceCIDR = "10.152.183.0/24" - b.EnableRBAC = vals.Pointer(true) - b.K8sDqlitePort = 9000 - b.Datastore = "k8s-dqlite" -} - -// ToMap marshals the BootstrapConfig into yaml and map it to "bootstrapConfig". -func (b *BootstrapConfig) ToMap() (map[string]string, error) { - config, err := yaml.Marshal(b) - if err != nil { - return nil, fmt.Errorf("failed to marshal config map: %w", err) - } - - return map[string]string{ - "bootstrapConfig": string(config), - }, nil -} - -// BootstrapConfigFromMap converts a string map to a BootstrapConfig struct. -func BootstrapConfigFromMap(m map[string]string) (*BootstrapConfig, error) { - config := &BootstrapConfig{} - err := yaml.Unmarshal([]byte(m["bootstrapConfig"]), config) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal bootstrap config: %w", err) - } - return config, nil -} - type ClusterRole string const ( @@ -93,16 +42,22 @@ type NodeStatus struct { DatastoreRole DatastoreRole `json:"datastore-role,omitempty"` } +type Datastore struct { + Type string `json:"type,omitempty"` + ExternalURL string `json:"external-url,omitempty" yaml:"external-url,omitempty"` +} + // ClusterStatus holds information about the cluster, e.g. its current members type ClusterStatus struct { // Ready is true if at least one node in the cluster is in READY state. - Ready bool `json:"ready,omitempty"` - Members []NodeStatus `json:"members,omitempty"` - Config UserFacingClusterConfig `json:"config,omitempty"` + Ready bool `json:"ready,omitempty"` + Members []NodeStatus `json:"members,omitempty"` + Config UserFacingClusterConfig `json:"config,omitempty"` + Datastore Datastore `json:"datastore,omitempty"` } -// HaClusterFormed returns true if the cluster is in high-availability mode (more than two voter nodes). -func (c ClusterStatus) HaClusterFormed() bool { +// haClusterFormed returns true if the cluster is in high-availability mode (more than two voter nodes). +func (c ClusterStatus) haClusterFormed() bool { voters := 0 for _, member := range c.Members { if member.DatastoreRole == DatastoreRoleVoter { @@ -112,26 +67,22 @@ func (c ClusterStatus) HaClusterFormed() bool { return voters > 2 } -// TODO: Print k8s version. However, multiple nodes can run different version, so we would need to query all nodes. -func (c ClusterStatus) String() string { +func (c ClusterStatus) datastoreToString() string { result := strings.Builder{} - if c.Ready { - result.WriteString("status: ready") - } else { - result.WriteString("status: not ready") + // Datastore + if c.Datastore.Type != "" { + result.WriteString(fmt.Sprintf(" type: %s\n", c.Datastore.Type)) + // Datastore URL for external only + if c.Datastore.Type == "external" { + if c.Datastore.ExternalURL != "" { + result.WriteString(fmt.Sprintf(" url: %s\n", c.Datastore.ExternalURL)) + } + return result.String() + } } - result.WriteString("\n") - - result.WriteString("high-availability: ") - if c.HaClusterFormed() { - result.WriteString("yes") - } else { - result.WriteString("no") - } - result.WriteString("\n") - result.WriteString("datastore:\n") + // Datastore roles for dqlite voters := make([]NodeStatus, 0, len(c.Members)) standBys := make([]NodeStatus, 0, len(c.Members)) spares := make([]NodeStatus, 0, len(c.Members)) @@ -170,35 +121,39 @@ func (c ClusterStatus) String() string { result.WriteString(" spare-nodes: none\n") } - printedConfig := UserFacingClusterConfig{} - if c.Config.Network != nil && c.Config.Network.Enabled != nil && *c.Config.Network.Enabled { - printedConfig.Network = c.Config.Network - } - if c.Config.DNS != nil && c.Config.DNS.Enabled != nil && *c.Config.DNS.Enabled { - printedConfig.DNS = c.Config.DNS - } - if c.Config.Ingress != nil && c.Config.Ingress.Enabled != nil && *c.Config.Ingress.Enabled { - printedConfig.Ingress = c.Config.Ingress - } - if c.Config.LoadBalancer != nil && c.Config.LoadBalancer.Enabled != nil && *c.Config.LoadBalancer.Enabled { - printedConfig.LoadBalancer = c.Config.LoadBalancer - } - if c.Config.LocalStorage != nil && c.Config.LocalStorage.Enabled != nil && *c.Config.LocalStorage.Enabled { - printedConfig.LocalStorage = c.Config.LocalStorage - } - if c.Config.Gateway != nil && c.Config.Gateway.Enabled != nil && *c.Config.Gateway.Enabled { - printedConfig.Gateway = c.Config.Gateway + return result.String() +} + +// TODO: Print k8s version. However, multiple nodes can run different version, so we would need to query all nodes. +func (c ClusterStatus) String() string { + result := strings.Builder{} + + // Status + if c.Ready { + result.WriteString("status: ready") + } else { + result.WriteString("status: not ready") } - if c.Config.MetricsServer != nil && c.Config.MetricsServer.Enabled != nil && *c.Config.MetricsServer.Enabled { - printedConfig.MetricsServer = c.Config.MetricsServer + result.WriteString("\n") + + // High availability + result.WriteString("high-availability: ") + if c.haClusterFormed() { + result.WriteString("yes") + } else { + result.WriteString("no") } - b, _ := yaml.Marshal(printedConfig) - // If no config is set the marshalling will return {} - if s := string(b); s != "{}\n" { - result.WriteString("\n") + // Datastore + result.WriteString("\n") + result.WriteString("datastore:\n") + result.WriteString(c.datastoreToString()) + + // Config + var emptyConfig UserFacingClusterConfig + if c.Config != emptyConfig { + b, _ := yaml.Marshal(c.Config) result.WriteString(string(b)) } - return result.String() } diff --git a/src/k8s/api/v1/types_test.go b/src/k8s/api/v1/types_test.go index 6a1c7e48e..adb262049 100644 --- a/src/k8s/api/v1/types_test.go +++ b/src/k8s/api/v1/types_test.go @@ -7,51 +7,8 @@ import ( . "github.com/onsi/gomega" ) -// This is expected to break if the default changes to make sure this is done intentionally. -func TestSetDefaults(t *testing.T) { - g := NewWithT(t) - - b := &BootstrapConfig{} - b.SetDefaults() - - expected := &BootstrapConfig{ - Components: []string{"dns", "metrics-server", "network", "gateway"}, - ClusterCIDR: "10.1.0.0/16", - ServiceCIDR: "10.152.183.0/24", - EnableRBAC: vals.Pointer(true), - K8sDqlitePort: 9000, - Datastore: "k8s-dqlite", - } - - g.Expect(b).To(Equal(expected)) -} - -func TestBootstrapConfigFromMap(t *testing.T) { - g := NewWithT(t) - // Create a new BootstrapConfig with default values - bc := &BootstrapConfig{ - ClusterCIDR: "10.1.0.0/16", - Components: []string{"dns", "network", "storage"}, - EnableRBAC: vals.Pointer(true), - K8sDqlitePort: 9000, - } - - // Convert the BootstrapConfig to a map - m, err := bc.ToMap() - g.Expect(err).To(BeNil()) - - // Unmarshal the YAML string from the map into a new BootstrapConfig instance - bcyaml, err := BootstrapConfigFromMap(m) - - // Check for errors - g.Expect(err).To(BeNil()) - // Compare the unmarshaled BootstrapConfig with the original one - g.Expect(bcyaml).To(Equal(bc)) // Note the *bc here to compare values, not pointers - -} - func TestHaClusterFormed(t *testing.T) { - g := NewGomegaWithT(t) + g := NewWithT(t) testCases := []struct { name string @@ -91,14 +48,12 @@ func TestHaClusterFormed(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - g.Expect(ClusterStatus{Members: tc.members}.HaClusterFormed()).To(Equal(tc.expectedResult)) + g.Expect(ClusterStatus{Members: tc.members}.haClusterFormed()).To(Equal(tc.expectedResult)) }) } } func TestString(t *testing.T) { - g := NewGomegaWithT(t) - testCases := []struct { name string clusterStatus ClusterStatus @@ -114,35 +69,58 @@ func TestString(t *testing.T) { {Name: "node3", DatastoreRole: DatastoreRoleVoter, Address: "192.168.0.3"}, }, Config: UserFacingClusterConfig{ - Network: &NetworkConfig{Enabled: vals.Pointer(true)}, - DNS: &DNSConfig{Enabled: vals.Pointer(true)}, + Network: NetworkConfig{Enabled: vals.Pointer(true)}, + DNS: DNSConfig{Enabled: vals.Pointer(true)}, }, + Datastore: Datastore{Type: "k8s-dqlite", ExternalURL: ""}, }, expectedOutput: `status: ready high-availability: yes datastore: + type: k8s-dqlite voter-nodes: - 192.168.0.1 - 192.168.0.2 - 192.168.0.3 standby-nodes: none spare-nodes: none - network: enabled: true dns: enabled: true - cluster-domain: "" - service-ip: "" - upstream-nameservers: [] +`, + }, + { + name: "External Datastore", + clusterStatus: ClusterStatus{ + Ready: true, + Members: []NodeStatus{ + {Name: "node1", DatastoreRole: DatastoreRoleVoter, Address: "192.168.0.1"}, + }, + Config: UserFacingClusterConfig{ + Network: NetworkConfig{Enabled: vals.Pointer(true)}, + DNS: DNSConfig{Enabled: vals.Pointer(true)}, + }, + Datastore: Datastore{Type: "external", ExternalURL: "etcd-url"}, + }, + expectedOutput: `status: ready +high-availability: no +datastore: + type: external + url: etcd-url +network: + enabled: true +dns: + enabled: true `, }, { name: "Cluster not ready, HA not formed, no nodes", clusterStatus: ClusterStatus{ - Ready: false, - Members: []NodeStatus{}, - Config: UserFacingClusterConfig{}, + Ready: false, + Members: []NodeStatus{}, + Config: UserFacingClusterConfig{}, + Datastore: Datastore{}, }, expectedOutput: `status: not ready high-availability: no @@ -156,6 +134,7 @@ datastore: for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) g.Expect(tc.clusterStatus.String()).To(Equal(tc.expectedOutput)) }) } diff --git a/src/k8s/api/v1/util.go b/src/k8s/api/v1/util.go new file mode 100644 index 000000000..005e8c98a --- /dev/null +++ b/src/k8s/api/v1/util.go @@ -0,0 +1,9 @@ +package v1 + +func getField[T any](val *T) T { + if val != nil { + return *val + } + var zero T + return zero +} diff --git a/src/k8s/cmd/k8s/k8s.go b/src/k8s/cmd/k8s/k8s.go index f1743726c..bb9e77ec0 100644 --- a/src/k8s/cmd/k8s/k8s.go +++ b/src/k8s/cmd/k8s/k8s.go @@ -61,8 +61,8 @@ func NewRootCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { cmd.PersistentFlags().StringVar(&opts.stateDir, "state-dir", "", "directory with the dqlite datastore") cmd.PersistentFlags().BoolVarP(&opts.logDebug, "debug", "d", false, "show all debug messages") cmd.PersistentFlags().BoolVarP(&opts.logVerbose, "verbose", "v", true, "show all information messages") - cmd.PersistentFlags().StringVarP(&opts.outputFormat, "output-format", "o", "plain", "set the output format to one of plain, json or yaml") - cmd.PersistentFlags().DurationVarP(&opts.timeout, "timeout", "t", 90*time.Second, "the max time to wait for the command to execute") + cmd.PersistentFlags().StringVar(&opts.outputFormat, "output-format", "plain", "set the output format to one of plain, json or yaml") + cmd.PersistentFlags().DurationVar(&opts.timeout, "timeout", 90*time.Second, "the max time to wait for the command to execute") // By default, the state dir is set to a fixed directory in the snap. // This shouldn't be overwritten by the user. diff --git a/src/k8s/cmd/k8s/k8s_bootstrap.go b/src/k8s/cmd/k8s/k8s_bootstrap.go index 03ac7a56d..2957427b3 100644 --- a/src/k8s/cmd/k8s/k8s_bootstrap.go +++ b/src/k8s/cmd/k8s/k8s_bootstrap.go @@ -8,11 +8,13 @@ import ( "os" "slices" "strings" + "unicode" apiv1 "github.com/canonical/k8s/api/v1" cmdutil "github.com/canonical/k8s/cmd/util" "github.com/canonical/k8s/pkg/config" "github.com/canonical/k8s/pkg/utils" + "github.com/canonical/k8s/pkg/utils/vals" "github.com/canonical/lxd/lxd/util" "github.com/spf13/cobra" "gopkg.in/yaml.v2" @@ -44,7 +46,7 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { PreRun: chainPreRunHooks(hookRequireRoot(env)), Run: func(cmd *cobra.Command, args []string) { if opts.interactive && opts.configFile != "" { - cmd.PrintErrln("Error: --interactive and --config flags cannot be set at the same time.") + cmd.PrintErrln("Error: --interactive and --file flags cannot be set at the same time.") env.Exit(1) return } @@ -64,8 +66,9 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { } if opts.address == "" { - opts.address = util.CanonicalNetworkAddress(util.NetworkInterfaceAddress(), config.DefaultPort) + opts.address = util.NetworkInterfaceAddress() } + opts.address = util.CanonicalNetworkAddress(opts.address, config.DefaultPort) client, err := env.Client(cmd.Context()) if err != nil { @@ -80,7 +83,7 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - bootstrapConfig := apiv1.BootstrapConfig{} + var bootstrapConfig apiv1.BootstrapConfig switch { case opts.interactive: bootstrapConfig = getConfigInteractively(env.Stdin, env.Stdout, env.Stderr) @@ -92,7 +95,23 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } default: - bootstrapConfig.SetDefaults() + // Default bootstrap configuration + bootstrapConfig = apiv1.BootstrapConfig{ + ClusterConfig: apiv1.UserFacingClusterConfig{ + Network: apiv1.NetworkConfig{ + Enabled: vals.Pointer(true), + }, + DNS: apiv1.DNSConfig{ + Enabled: vals.Pointer(true), + }, + Gateway: apiv1.GatewayConfig{ + Enabled: vals.Pointer(true), + }, + MetricsServer: apiv1.MetricsServerConfig{ + Enabled: vals.Pointer(true), + }, + }, + } } cmd.PrintErrln("Bootstrapping the cluster. This may take a few seconds, please wait.") @@ -117,24 +136,22 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { } cmd.PersistentFlags().BoolVar(&opts.interactive, "interactive", false, "interactively configure the most important cluster options") - cmd.PersistentFlags().StringVar(&opts.configFile, "config", "", "path to the YAML file containing your custom cluster bootstrap configuration.") + cmd.PersistentFlags().StringVar(&opts.configFile, "file", "", "path to the YAML file containing your custom cluster bootstrap configuration.") cmd.Flags().StringVar(&opts.name, "name", "", "node name, defaults to hostname") cmd.Flags().StringVar(&opts.address, "address", "", "microcluster address, defaults to the node IP address") + return cmd } func getConfigFromYaml(filePath string) (apiv1.BootstrapConfig, error) { - config := apiv1.BootstrapConfig{} - config.SetDefaults() - - yamlContent, err := os.ReadFile(filePath) + b, err := os.ReadFile(filePath) if err != nil { - return config, fmt.Errorf("failed to read YAML config file: %w", err) + return apiv1.BootstrapConfig{}, fmt.Errorf("failed to read file: %w", err) } - err = yaml.Unmarshal(yamlContent, &config) - if err != nil { - return config, fmt.Errorf("failed to parse YAML config file: %w", err) + var config apiv1.BootstrapConfig + if err := yaml.UnmarshalStrict(b, &config); err != nil { + return apiv1.BootstrapConfig{}, fmt.Errorf("failed to parse YAML config file: %w", err) } return config, nil @@ -142,21 +159,41 @@ func getConfigFromYaml(filePath string) (apiv1.BootstrapConfig, error) { func getConfigInteractively(stdin io.Reader, stdout io.Writer, stderr io.Writer) apiv1.BootstrapConfig { config := apiv1.BootstrapConfig{} - config.SetDefaults() components := askQuestion( stdin, stdout, stderr, "Which components would you like to enable?", componentList, - strings.Join(config.Components, ", "), + "network, dns, gateway, metrics-server", nil, ) - config.Components = strings.Split(components, ",") + for _, component := range strings.FieldsFunc(components, func(r rune) bool { return unicode.IsSpace(r) || r == ',' }) { + switch component { + case "network": + config.ClusterConfig.Network.Enabled = vals.Pointer(true) + case "dns": + config.ClusterConfig.DNS.Enabled = vals.Pointer(true) + case "ingress": + config.ClusterConfig.Ingress.Enabled = vals.Pointer(true) + case "load-balancer": + config.ClusterConfig.LoadBalancer.Enabled = vals.Pointer(true) + case "gateway": + config.ClusterConfig.Gateway.Enabled = vals.Pointer(true) + case "local-storage": + config.ClusterConfig.LocalStorage.Enabled = vals.Pointer(true) + case "metrics-server": + config.ClusterConfig.MetricsServer.Enabled = vals.Pointer(true) + } + } + + podCIDR := askQuestion(stdin, stdout, stderr, "Please set the Pod CIDR:", nil, "10.1.0.0/16", nil) + serviceCIDR := askQuestion(stdin, stdout, stderr, "Please set the Service CIDR:", nil, "10.152.183.0/24", nil) + + config.PodCIDR = vals.Pointer(podCIDR) + config.ServiceCIDR = vals.Pointer(serviceCIDR) + + // TODO: any other configs we care about in the interactive bootstrap? - config.ClusterCIDR = askQuestion(stdin, stdout, stderr, "Please set the Cluster CIDR:", nil, config.ClusterCIDR, nil) - config.ServiceCIDR = askQuestion(stdin, stdout, stderr, "Please set the Service CIDR:", nil, config.ServiceCIDR, nil) - rbac := askBool(stdin, stdout, stderr, "Enable Role Based Access Control (RBAC)?", []string{"yes", "no"}, "yes") - *config.EnableRBAC = rbac return config } @@ -212,18 +249,3 @@ func askQuestion(stdin io.Reader, stdout io.Writer, stderr io.Writer, question s return s } } - -// askBool asks a question and expect a yes/no answer. -func askBool(stdin io.Reader, stdout io.Writer, stderr io.Writer, question string, options []string, defaultVal string) bool { - for { - answer := askQuestion(stdin, stdout, stderr, question, options, defaultVal, nil) - - if utils.ValueInSlice(strings.ToLower(answer), []string{"yes", "y"}) { - return true - } else if utils.ValueInSlice(strings.ToLower(answer), []string{"no", "n"}) { - return false - } - - fmt.Fprintf(stderr, "Invalid input, try again.\n\n") - } -} diff --git a/src/k8s/cmd/k8s/k8s_bootstrap_test.go b/src/k8s/cmd/k8s/k8s_bootstrap_test.go index 3257c41a1..806c26d90 100644 --- a/src/k8s/cmd/k8s/k8s_bootstrap_test.go +++ b/src/k8s/cmd/k8s/k8s_bootstrap_test.go @@ -1,6 +1,7 @@ package k8s import ( + _ "embed" "os" "path/filepath" "testing" @@ -10,6 +11,15 @@ import ( . "github.com/onsi/gomega" ) +var ( + //go:embed testdata/bootstrap-config-full.yaml + bootstrapConfigFull string + //go:embed testdata/bootstrap-config-some.yaml + bootstrapConfigSome string + //go:embed testdata/bootstrap-config-invalid-keys.yaml + bootstrapConfigInvalidKeys string +) + type testCase struct { name string yamlConfig string @@ -19,45 +29,62 @@ type testCase struct { var testCases = []testCase{ { - name: "CompleteConfig", - yamlConfig: ` -components: - - network - - dns - - gateway - - ingress - - storage - - metrics-server -cluster-cidr: "10.244.0.0/16" -service-cidr: "10.152.100.0/24" -enable-rbac: true -k8s-dqlite-port: 12379`, + name: "FullConfig", + yamlConfig: bootstrapConfigFull, expectedConfig: apiv1.BootstrapConfig{ - Components: []string{"network", "dns", "gateway", "ingress", "storage", "metrics-server"}, - ClusterCIDR: "10.244.0.0/16", - ServiceCIDR: "10.152.100.0/24", - EnableRBAC: vals.Pointer(true), - K8sDqlitePort: 12379, - Datastore: "k8s-dqlite", + ClusterConfig: apiv1.UserFacingClusterConfig{ + Network: apiv1.NetworkConfig{ + Enabled: vals.Pointer(true), + }, + DNS: apiv1.DNSConfig{ + Enabled: vals.Pointer(true), + ClusterDomain: vals.Pointer("cluster.local"), + }, + Ingress: apiv1.IngressConfig{ + Enabled: vals.Pointer(true), + }, + LoadBalancer: apiv1.LoadBalancerConfig{ + Enabled: vals.Pointer(true), + L2Mode: vals.Pointer(true), + CIDRs: vals.Pointer([]string{"10.0.0.0/24"}), + }, + LocalStorage: apiv1.LocalStorageConfig{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("/storage/path"), + SetDefault: vals.Pointer(false), + }, + Gateway: apiv1.GatewayConfig{ + Enabled: vals.Pointer(true), + }, + MetricsServer: apiv1.MetricsServerConfig{ + Enabled: vals.Pointer(true), + }, + }, + PodCIDR: vals.Pointer("10.100.0.0/16"), + ServiceCIDR: vals.Pointer("10.200.0.0/16"), + DisableRBAC: vals.Pointer(false), + SecurePort: vals.Pointer(6443), + CloudProvider: vals.Pointer("external"), + K8sDqlitePort: vals.Pointer(9090), + DatastoreType: vals.Pointer("k8s-dqlite"), + ExtraSANs: []string{"custom.kubernetes"}, }, }, { - name: "IncompleteConfig", - yamlConfig: ` -cluster-cidr: "10.244.0.0/16" -enable-rbac: true -bananas: 5`, + name: "SomeConfig", + yamlConfig: bootstrapConfigSome, expectedConfig: apiv1.BootstrapConfig{ - Components: []string{"dns", "metrics-server", "network", "gateway"}, - ClusterCIDR: "10.244.0.0/16", - ServiceCIDR: "10.152.183.0/24", - EnableRBAC: vals.Pointer(true), - K8sDqlitePort: 9000, - Datastore: "k8s-dqlite", + PodCIDR: vals.Pointer("10.100.0.0/16"), + ServiceCIDR: vals.Pointer("10.152.200.0/24"), }, }, { - name: "InvalidYaml", + name: "InvalidKeys", + yamlConfig: bootstrapConfigInvalidKeys, + expectedError: "field cluster-cidr not found in type v1.BootstrapConfig", + }, + { + name: "InvalidYAML", yamlConfig: "this is not valid yaml", expectedError: "failed to parse YAML config file", }, diff --git a/src/k8s/cmd/k8s/k8s_disable.go b/src/k8s/cmd/k8s/k8s_disable.go index 961f68229..660db0e80 100644 --- a/src/k8s/cmd/k8s/k8s_disable.go +++ b/src/k8s/cmd/k8s/k8s_disable.go @@ -38,31 +38,31 @@ func newDisableCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { switch functionality { case "network": - config.Network = &api.NetworkConfig{ + config.Network = api.NetworkConfig{ Enabled: vals.Pointer(false), } case "dns": - config.DNS = &api.DNSConfig{ + config.DNS = api.DNSConfig{ Enabled: vals.Pointer(false), } case "gateway": - config.Gateway = &api.GatewayConfig{ + config.Gateway = api.GatewayConfig{ Enabled: vals.Pointer(false), } case "ingress": - config.Ingress = &api.IngressConfig{ + config.Ingress = api.IngressConfig{ Enabled: vals.Pointer(false), } case "local-storage": - config.LocalStorage = &api.LocalStorageConfig{ + config.LocalStorage = api.LocalStorageConfig{ Enabled: vals.Pointer(false), } case "load-balancer": - config.LoadBalancer = &api.LoadBalancerConfig{ + config.LoadBalancer = api.LoadBalancerConfig{ Enabled: vals.Pointer(false), } case "metrics-server": - config.MetricsServer = &api.MetricsServerConfig{ + config.MetricsServer = api.MetricsServerConfig{ Enabled: vals.Pointer(false), } } diff --git a/src/k8s/cmd/k8s/k8s_disable_test.go b/src/k8s/cmd/k8s/k8s_disable_test.go index 5dcc40147..aa66a6a1f 100644 --- a/src/k8s/cmd/k8s/k8s_disable_test.go +++ b/src/k8s/cmd/k8s/k8s_disable_test.go @@ -34,7 +34,7 @@ func TestDisableCmd(t *testing.T) { funcs: []string{"gateway"}, expectedCall: apiv1.UpdateClusterConfigRequest{ Config: apiv1.UserFacingClusterConfig{ - Gateway: &apiv1.GatewayConfig{Enabled: vals.Pointer(false)}, + Gateway: apiv1.GatewayConfig{Enabled: vals.Pointer(false)}, }, }, expectedStdout: "disabled", @@ -44,8 +44,8 @@ func TestDisableCmd(t *testing.T) { funcs: []string{"load-balancer", "gateway"}, expectedCall: apiv1.UpdateClusterConfigRequest{ Config: apiv1.UserFacingClusterConfig{ - Gateway: &apiv1.GatewayConfig{Enabled: vals.Pointer(false)}, - LoadBalancer: &apiv1.LoadBalancerConfig{Enabled: vals.Pointer(false)}, + Gateway: apiv1.GatewayConfig{Enabled: vals.Pointer(false)}, + LoadBalancer: apiv1.LoadBalancerConfig{Enabled: vals.Pointer(false)}, }, }, expectedStdout: "disabled", diff --git a/src/k8s/cmd/k8s/k8s_enable.go b/src/k8s/cmd/k8s/k8s_enable.go index 57304cd7f..239159e4c 100644 --- a/src/k8s/cmd/k8s/k8s_enable.go +++ b/src/k8s/cmd/k8s/k8s_enable.go @@ -38,31 +38,31 @@ func newEnableCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { switch functionality { case "network": - config.Network = &api.NetworkConfig{ + config.Network = api.NetworkConfig{ Enabled: vals.Pointer(true), } case "dns": - config.DNS = &api.DNSConfig{ + config.DNS = api.DNSConfig{ Enabled: vals.Pointer(true), } case "gateway": - config.Gateway = &api.GatewayConfig{ + config.Gateway = api.GatewayConfig{ Enabled: vals.Pointer(true), } case "ingress": - config.Ingress = &api.IngressConfig{ + config.Ingress = api.IngressConfig{ Enabled: vals.Pointer(true), } case "local-storage": - config.LocalStorage = &api.LocalStorageConfig{ + config.LocalStorage = api.LocalStorageConfig{ Enabled: vals.Pointer(true), } case "load-balancer": - config.LoadBalancer = &api.LoadBalancerConfig{ + config.LoadBalancer = api.LoadBalancerConfig{ Enabled: vals.Pointer(true), } case "metrics-server": - config.MetricsServer = &api.MetricsServerConfig{ + config.MetricsServer = api.MetricsServerConfig{ Enabled: vals.Pointer(true), } } diff --git a/src/k8s/cmd/k8s/k8s_enable_test.go b/src/k8s/cmd/k8s/k8s_enable_test.go index e40d75f83..4c8e19d58 100644 --- a/src/k8s/cmd/k8s/k8s_enable_test.go +++ b/src/k8s/cmd/k8s/k8s_enable_test.go @@ -34,7 +34,7 @@ func TestK8sEnableCmd(t *testing.T) { funcs: []string{"gateway"}, expectedCall: apiv1.UpdateClusterConfigRequest{ Config: apiv1.UserFacingClusterConfig{ - Gateway: &apiv1.GatewayConfig{Enabled: vals.Pointer(true)}, + Gateway: apiv1.GatewayConfig{Enabled: vals.Pointer(true)}, }, }, expectedStdout: "enabled", @@ -44,8 +44,8 @@ func TestK8sEnableCmd(t *testing.T) { funcs: []string{"load-balancer", "gateway"}, expectedCall: apiv1.UpdateClusterConfigRequest{ Config: apiv1.UserFacingClusterConfig{ - Gateway: &apiv1.GatewayConfig{Enabled: vals.Pointer(true)}, - LoadBalancer: &apiv1.LoadBalancerConfig{Enabled: vals.Pointer(true)}, + Gateway: apiv1.GatewayConfig{Enabled: vals.Pointer(true)}, + LoadBalancer: apiv1.LoadBalancerConfig{Enabled: vals.Pointer(true)}, }, }, expectedStdout: "enabled", diff --git a/src/k8s/cmd/k8s/k8s_get.go b/src/k8s/cmd/k8s/k8s_get.go index 86902fcda..f9b463fa5 100644 --- a/src/k8s/cmd/k8s/k8s_get.go +++ b/src/k8s/cmd/k8s/k8s_get.go @@ -41,65 +41,65 @@ func newGetCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { case "": output = config case "network": - output = *config.Network + output = config.Network case "dns": - output = *config.DNS + output = config.DNS case "gateway": - output = *config.Gateway + output = config.Gateway case "ingress": - output = *config.Ingress + output = config.Ingress case "local-storage": - output = *config.LocalStorage + output = config.LocalStorage case "load-balancer": - output = *config.LoadBalancer + output = config.LoadBalancer case "metrics-server": - output = *config.MetricsServer + output = config.MetricsServer case "network.enabled": - output = *config.Network.Enabled + output = config.Network.GetEnabled() case "dns.enabled": - output = *config.DNS.Enabled + output = config.DNS.GetEnabled() case "dns.upstream-nameservers": - output = config.DNS.UpstreamNameservers + output = config.DNS.GetUpstreamNameservers() case "dns.cluster-domain": - output = config.DNS.ClusterDomain + output = config.DNS.GetClusterDomain() case "dns.service-ip": - output = config.DNS.ServiceIP + output = config.DNS.GetServiceIP() case "gateway.enabled": - output = *config.Gateway.Enabled + output = config.Gateway.GetEnabled() case "ingress.enabled": - output = *config.Ingress.Enabled + output = config.Ingress.GetEnabled() case "ingress.default-tls-secret": - output = config.Ingress.DefaultTLSSecret + output = config.Ingress.GetDefaultTLSSecret() case "ingress.enable-proxy-protocol": - output = *config.Ingress.EnableProxyProtocol + output = config.Ingress.GetEnableProxyProtocol() case "local-storage.enabled": - output = *config.LocalStorage.Enabled + output = config.LocalStorage.GetEnabled() case "local-storage.local-path": - output = config.LocalStorage.LocalPath + output = config.LocalStorage.GetLocalPath() case "local-storage.reclaim-policy": - output = config.LocalStorage.ReclaimPolicy + output = config.LocalStorage.GetReclaimPolicy() case "local-storage.set-default": - output = *config.LocalStorage.SetDefault + output = config.LocalStorage.GetSetDefault() case "load-balancer.enabled": - output = *config.LoadBalancer.Enabled + output = config.LoadBalancer.GetEnabled() case "load-balancer.cidrs": - output = config.LoadBalancer.CIDRs + output = config.LoadBalancer.GetCIDRs() case "load-balancer.l2-mode": - output = *config.LoadBalancer.L2Enabled + output = config.LoadBalancer.GetL2Mode() case "load-balancer.l2-interfaces": - output = config.LoadBalancer.L2Interfaces + output = config.LoadBalancer.GetL2Interfaces() case "load-balancer.bgp-mode": - output = *config.LoadBalancer.BGPEnabled + output = config.LoadBalancer.GetBGPMode() case "load-balancer.bgp-local-asn": - output = config.LoadBalancer.BGPLocalASN + output = config.LoadBalancer.GetBGPLocalASN() case "load-balancer.bgp-peer-address": - output = config.LoadBalancer.BGPPeerAddress + output = config.LoadBalancer.GetBGPPeerAddress() case "load-balancer.bgp-peer-port": - output = config.LoadBalancer.BGPPeerPort + output = config.LoadBalancer.GetBGPPeerPort() case "load-balancer.bgp-peer-asn": - output = config.LoadBalancer.BGPPeerASN + output = config.LoadBalancer.GetBGPPeerASN() case "metrics-server.enabled": - output = *config.MetricsServer.Enabled + output = config.MetricsServer.GetEnabled() default: cmd.PrintErrf("Error: Unknown config key %q.\n", key) env.Exit(1) diff --git a/src/k8s/cmd/k8s/k8s_join_cluster.go b/src/k8s/cmd/k8s/k8s_join_cluster.go index e5826f6aa..50de7b3ea 100644 --- a/src/k8s/cmd/k8s/k8s_join_cluster.go +++ b/src/k8s/cmd/k8s/k8s_join_cluster.go @@ -45,8 +45,9 @@ func newJoinClusterCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { } if opts.address == "" { - opts.address = util.CanonicalNetworkAddress(util.NetworkInterfaceAddress(), config.DefaultPort) + opts.address = util.NetworkInterfaceAddress() } + opts.address = util.CanonicalNetworkAddress(opts.address, config.DefaultPort) client, err := env.Client(cmd.Context()) if err != nil { diff --git a/src/k8s/cmd/k8s/k8s_set.go b/src/k8s/cmd/k8s/k8s_set.go index 8d47958e8..84906e5de 100644 --- a/src/k8s/cmd/k8s/k8s_set.go +++ b/src/k8s/cmd/k8s/k8s_set.go @@ -44,36 +44,6 @@ func newSetCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - // Fetching current config to check where an already enabled functionality is updated. - currentConfig, err := client.GetClusterConfig(cmd.Context(), apiv1.GetClusterConfigRequest{}) - if err != nil { - cmd.PrintErrf("Error: Failed to retrieve the current cluster configuration.\n\nThe error was: %v\n", err) - env.Exit(1) - return - } - - if vals.OptionalBool(currentConfig.Network.Enabled, false) && config.Network != nil && config.Network.Enabled == nil { - cmd.PrintErrln("network configuration will be updated") - } - if vals.OptionalBool(currentConfig.DNS.Enabled, false) && config.DNS != nil && config.DNS.Enabled == nil { - cmd.PrintErrln("dns configuration will be updated") - } - if vals.OptionalBool(currentConfig.Gateway.Enabled, false) && config.Gateway != nil && config.Gateway.Enabled == nil { - cmd.PrintErrln("gateway configuration will be updated") - } - if vals.OptionalBool(currentConfig.Ingress.Enabled, false) && config.Ingress != nil && config.Ingress.Enabled == nil { - cmd.PrintErrln("ingress configuration will be updated") - } - if vals.OptionalBool(currentConfig.LocalStorage.Enabled, false) && config.LocalStorage != nil && config.LocalStorage.Enabled == nil { - cmd.PrintErrln("local-storage configuration will be updated") - } - if vals.OptionalBool(currentConfig.LoadBalancer.Enabled, false) && config.LoadBalancer != nil && config.LoadBalancer.Enabled == nil { - cmd.PrintErrln("load-balancer configuration will be updated") - } - if vals.OptionalBool(currentConfig.MetricsServer.Enabled, false) && config.MetricsServer != nil && config.MetricsServer.Enabled == nil { - cmd.PrintErrln("metrics-server configuration will be updated") - } - request := apiv1.UpdateClusterConfigRequest{ Config: config, } @@ -101,171 +71,102 @@ func updateConfig(config *apiv1.UserFacingClusterConfig, arg string) error { switch key { case "network.enabled": - if config.Network == nil { - config.Network = &apiv1.NetworkConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for network.enabled: %w", err) } config.Network.Enabled = &v case "dns.enabled": - if config.DNS == nil { - config.DNS = &apiv1.DNSConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for dns.enabled: %w", err) } config.DNS.Enabled = &v case "dns.upstream-nameservers": - if config.DNS == nil { - config.DNS = &apiv1.DNSConfig{} - } - config.DNS.UpstreamNameservers = strings.FieldsFunc(value, func(r rune) bool { return unicode.IsSpace(r) || r == ',' }) + config.DNS.UpstreamNameservers = vals.Pointer(strings.FieldsFunc(value, func(r rune) bool { return unicode.IsSpace(r) || r == ',' })) case "dns.cluster-domain": - if config.DNS == nil { - config.DNS = &apiv1.DNSConfig{} - } - config.DNS.ClusterDomain = value + config.DNS.ClusterDomain = vals.Pointer(value) case "dns.service-ip": - if config.DNS == nil { - config.DNS = &apiv1.DNSConfig{} - } - config.DNS.ServiceIP = value + config.DNS.ServiceIP = vals.Pointer(value) case "gateway.enabled": - if config.Gateway == nil { - config.Gateway = &apiv1.GatewayConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for gateway.enabled: %w", err) } config.Gateway.Enabled = &v case "ingress.enabled": - if config.Ingress == nil { - config.Ingress = &apiv1.IngressConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for ingress.enabled: %w", err) } config.Ingress.Enabled = &v case "ingress.default-tls-secret": - if config.Ingress == nil { - config.Ingress = &apiv1.IngressConfig{} - } - config.Ingress.DefaultTLSSecret = value + config.Ingress.DefaultTLSSecret = vals.Pointer(value) case "ingress.enable-proxy-protocol": - if config.Ingress == nil { - config.Ingress = &apiv1.IngressConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for ingress.enable-proxy-protocol: %w", err) } config.Ingress.EnableProxyProtocol = &v case "local-storage.enabled": - if config.LocalStorage == nil { - config.LocalStorage = &apiv1.LocalStorageConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for local-storage.enabled: %w", err) } config.LocalStorage.Enabled = &v case "local-storage.local-path": - if config.LocalStorage == nil { - config.LocalStorage = &apiv1.LocalStorageConfig{} - } - config.LocalStorage.LocalPath = value + config.LocalStorage.LocalPath = vals.Pointer(value) case "local-storage.reclaim-policy": - if config.LocalStorage == nil { - config.LocalStorage = &apiv1.LocalStorageConfig{} - } - config.LocalStorage.ReclaimPolicy = value + config.LocalStorage.ReclaimPolicy = vals.Pointer(value) case "local-storage.set-default": - if config.LocalStorage == nil { - config.LocalStorage = &apiv1.LocalStorageConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for local-storage.set-default: %w", err) } config.LocalStorage.SetDefault = &v case "load-balancer.enabled": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for load-balancer.enabled: %w", err) } config.LoadBalancer.Enabled = &v case "load-balancer.cidrs": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } - config.LoadBalancer.CIDRs = strings.FieldsFunc(value, func(r rune) bool { return unicode.IsSpace(r) || r == ',' }) + config.LoadBalancer.CIDRs = vals.Pointer(strings.FieldsFunc(value, func(r rune) bool { return unicode.IsSpace(r) || r == ',' })) case "load-balancer.l2-mode": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for load-balancer.l2-mode: %w", err) } - config.LoadBalancer.L2Enabled = &v + config.LoadBalancer.L2Mode = &v case "load-balancer.l2-interfaces": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } - config.LoadBalancer.L2Interfaces = strings.FieldsFunc(value, func(r rune) bool { return unicode.IsSpace(r) || r == ',' }) + config.LoadBalancer.L2Interfaces = vals.Pointer(strings.FieldsFunc(value, func(r rune) bool { return unicode.IsSpace(r) || r == ',' })) case "load-balancer.bgp-mode": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for load-balancer.bgp-mode: %w", err) } - config.LoadBalancer.BGPEnabled = &v + config.LoadBalancer.BGPMode = &v case "load-balancer.bgp-local-asn": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } v, err := strconv.Atoi(value) if err != nil { return fmt.Errorf("invalid integer value for load-balancer.bgp-local-asn: %w", err) } - config.LoadBalancer.BGPLocalASN = v + config.LoadBalancer.BGPLocalASN = &v case "load-balancer.bgp-peer-address": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } - config.LoadBalancer.BGPPeerAddress = value + config.LoadBalancer.BGPPeerAddress = vals.Pointer(value) case "load-balancer.bgp-peer-port": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } v, err := strconv.Atoi(value) if err != nil { return fmt.Errorf("invalid integer value for load-balancer.bgp-peer-port: %w", err) } - config.LoadBalancer.BGPPeerPort = v + config.LoadBalancer.BGPPeerPort = &v case "load-balancer.bgp-peer-asn": - if config.LoadBalancer == nil { - config.LoadBalancer = &apiv1.LoadBalancerConfig{} - } v, err := strconv.Atoi(value) if err != nil { return fmt.Errorf("invalid integer value for load-balancer.bgp-peer-asn: %w", err) } - config.LoadBalancer.BGPPeerASN = v + config.LoadBalancer.BGPPeerASN = &v case "metrics-server.enabled": - if config.MetricsServer == nil { - config.MetricsServer = &apiv1.MetricsServerConfig{} - } v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("invalid boolean value for metrics-server.enabled: %w", err) diff --git a/src/k8s/cmd/k8s/testdata/bootstrap-config-full.yaml b/src/k8s/cmd/k8s/testdata/bootstrap-config-full.yaml new file mode 100644 index 000000000..dbbfdecd6 --- /dev/null +++ b/src/k8s/cmd/k8s/testdata/bootstrap-config-full.yaml @@ -0,0 +1,30 @@ +cluster-config: + network: + enabled: true + dns: + enabled: true + cluster-domain: cluster.local + ingress: + enabled: true + load-balancer: + enabled: true + cidrs: + - 10.0.0.0/24 + l2-mode: true + local-storage: + enabled: true + local-path: /storage/path + set-default: false + gateway: + enabled: true + metrics-server: + enabled: true +pod-cidr: 10.100.0.0/16 +service-cidr: 10.200.0.0/16 +disable-rbac: false +secure-port: 6443 +cloud-provider: external +k8s-dqlite-port: 9090 +datastore-type: k8s-dqlite +extra-sans: +- custom.kubernetes diff --git a/src/k8s/cmd/k8s/testdata/bootstrap-config-invalid-keys.yaml b/src/k8s/cmd/k8s/testdata/bootstrap-config-invalid-keys.yaml new file mode 100644 index 000000000..a98d86861 --- /dev/null +++ b/src/k8s/cmd/k8s/testdata/bootstrap-config-invalid-keys.yaml @@ -0,0 +1,2 @@ +cluster-cidr: "10.244.0.0/16" +disable-rbac: true diff --git a/src/k8s/cmd/k8s/testdata/bootstrap-config-some.yaml b/src/k8s/cmd/k8s/testdata/bootstrap-config-some.yaml new file mode 100644 index 000000000..e923f5084 --- /dev/null +++ b/src/k8s/cmd/k8s/testdata/bootstrap-config-some.yaml @@ -0,0 +1,2 @@ +pod-cidr: "10.100.0.0/16" +service-cidr: "10.152.200.0/24" diff --git a/src/k8s/cmd/k8sd/k8sd.go b/src/k8s/cmd/k8sd/k8sd.go index 0edd85072..86dce8184 100644 --- a/src/k8s/cmd/k8sd/k8sd.go +++ b/src/k8s/cmd/k8sd/k8sd.go @@ -2,7 +2,6 @@ package k8sd import ( cmdutil "github.com/canonical/k8s/cmd/util" - "github.com/canonical/k8s/pkg/config" "github.com/canonical/k8s/pkg/k8sd/app" "github.com/spf13/cobra" ) @@ -11,7 +10,6 @@ var rootCmdOpts struct { logDebug bool logVerbose bool stateDir string - port uint } func NewRootCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { @@ -20,11 +18,10 @@ func NewRootCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { Short: "Canonical Kubernetes orchestrator and clustering daemon", Run: func(cmd *cobra.Command, args []string) { app, err := app.New(cmd.Context(), app.Config{ - Debug: rootCmdOpts.logDebug, - Verbose: rootCmdOpts.logVerbose, - StateDir: rootCmdOpts.stateDir, - ListenPort: rootCmdOpts.port, - Snap: env.Snap, + Debug: rootCmdOpts.logDebug, + Verbose: rootCmdOpts.logVerbose, + StateDir: rootCmdOpts.stateDir, + Snap: env.Snap, }) if err != nil { cmd.PrintErrf("Error: Failed to initialize k8sd: %v", err) @@ -46,9 +43,11 @@ func NewRootCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { cmd.PersistentFlags().BoolVarP(&rootCmdOpts.logDebug, "debug", "d", false, "Show all debug messages") cmd.PersistentFlags().BoolVarP(&rootCmdOpts.logVerbose, "verbose", "v", true, "Show all information messages") - cmd.PersistentFlags().UintVar(&rootCmdOpts.port, "port", config.DefaultPort, "Port on which the REST API is exposed") cmd.PersistentFlags().StringVar(&rootCmdOpts.stateDir, "state-dir", "", "Directory with the dqlite datastore") + cmd.Flags().Uint("port", 0, "Default port for the HTTP API") + cmd.Flags().MarkDeprecated("port", "this flag does not have any effect, and will be removed in a future version") + cmd.AddCommand(newSqlCmd(env)) return cmd diff --git a/src/k8s/cmd/k8sd/k8sd_sql.go b/src/k8s/cmd/k8sd/k8sd_sql.go index 1bef94013..17fe431ff 100644 --- a/src/k8s/cmd/k8sd/k8sd_sql.go +++ b/src/k8s/cmd/k8sd/k8sd_sql.go @@ -13,10 +13,9 @@ func newSqlCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { Hidden: true, Args: cmdutil.ExactArgs(env, 1), Run: func(cmd *cobra.Command, args []string) { - cluster, err := app.New(cmd.Context(), app.Config{ - StateDir: rootCmdOpts.stateDir, - ListenPort: rootCmdOpts.port, - Snap: env.Snap, + app, err := app.New(cmd.Context(), app.Config{ + StateDir: rootCmdOpts.stateDir, + Snap: env.Snap, }) if err != nil { cmd.PrintErrf("Error: Failed to initialize k8sd app.\n\nThe error was: %v\n", err) @@ -24,7 +23,7 @@ func newSqlCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - _, batch, err := cluster.MicroCluster.SQL(args[0]) + _, batch, err := app.MicroCluster().SQL(args[0]) if err != nil { cmd.PrintErrf("Error: Failed to execute the SQL query.\n\nThe error was: %v\n", err) env.Exit(1) diff --git a/src/k8s/go.mod b/src/k8s/go.mod index effc3bd48..1f3ddfdcb 100644 --- a/src/k8s/go.mod +++ b/src/k8s/go.mod @@ -4,8 +4,8 @@ go 1.21.4 require ( github.com/canonical/go-dqlite v1.21.0 - github.com/canonical/lxd v0.0.0-20231002162033-38796399c135 - github.com/canonical/microcluster v0.0.0-20240122235408-1525f8ea8d7a + github.com/canonical/lxd v0.0.0-20240124090112-6612e64073cb + github.com/canonical/microcluster v0.0.0-20240402231055-e8e97999a2eb github.com/moby/sys/mountinfo v0.7.1 github.com/onsi/gomega v1.30.0 github.com/pelletier/go-toml v1.9.5 @@ -30,7 +30,7 @@ require ( github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/Rican7/retry v0.3.1 // indirect - github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a // indirect + github.com/armon/go-proxyproto v0.1.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -74,10 +74,10 @@ require ( github.com/google/renameio v1.0.1 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect - github.com/gorilla/schema v1.2.0 // indirect - github.com/gorilla/securecookie v1.1.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/schema v1.2.1 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -99,9 +99,9 @@ require ( github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/mattn/go-sqlite3 v1.14.18 // indirect + github.com/mattn/go-sqlite3 v1.14.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -118,7 +118,6 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pborman/uuid v1.2.1 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.6 // indirect @@ -127,7 +126,7 @@ require ( github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/rogpeppe/fastuuid v1.2.0 // indirect @@ -142,16 +141,16 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - github.com/zitadel/oidc/v2 v2.11.0 // indirect + github.com/zitadel/oidc/v2 v2.12.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect go.opentelemetry.io/otel v1.23.1 // indirect go.opentelemetry.io/otel/metric v1.23.1 // indirect go.opentelemetry.io/otel/trace v1.23.1 // indirect - go.starlark.net v0.0.0-20230912135651-745481cf39ed // indirect + go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect golang.org/x/crypto v0.19.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect @@ -173,7 +172,7 @@ require ( k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubectl v0.29.0 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/src/k8s/go.sum b/src/k8s/go.sum index b359eb714..ac254ad56 100644 --- a/src/k8s/go.sum +++ b/src/k8s/go.sum @@ -72,8 +72,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a h1:AP/vsCIvJZ129pdm9Ek7bH7yutN3hByqsMoNrWAxRQc= -github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= +github.com/armon/go-proxyproto v0.1.0 h1:TWWcSsjco7o2itn6r25/5AqKBiWmsiuzsUDLT/MTl7k= +github.com/armon/go-proxyproto v0.1.0/go.mod h1:Xj90dce2VKbHzRAeiVQAMBtj4M5oidoXJ8lmgyW21mw= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -95,10 +95,10 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXe github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/canonical/go-dqlite v1.21.0 h1:4gLDdV2GF+vg0yv9Ff+mfZZNQ1JGhnQ3GnS2GeZPHfA= github.com/canonical/go-dqlite v1.21.0/go.mod h1:Uvy943N8R4CFUAs59A1NVaziWY9nJ686lScY7ywurfg= -github.com/canonical/lxd v0.0.0-20231002162033-38796399c135 h1:4fZ5NMxbaVvm5VFR3fh3XzoCY58VQicSNKlSWqzDh2U= -github.com/canonical/lxd v0.0.0-20231002162033-38796399c135/go.mod h1:/lJ1suHbUSZn1VgwfWEcjkddHk4JeLTjtBwFPY0Eb7o= -github.com/canonical/microcluster v0.0.0-20240122235408-1525f8ea8d7a h1:LFRL3wLYCEp54NrPN5SLmGPPmih7VLZ8AWcBg4Khopc= -github.com/canonical/microcluster v0.0.0-20240122235408-1525f8ea8d7a/go.mod h1:pqKGCjymAfdqHmUC77AMSKq9sRTazucRyPLjFDI2muM= +github.com/canonical/lxd v0.0.0-20240124090112-6612e64073cb h1:TIfBKyW70Jy83stJqA0Oq41AxgQ/s+ZsL/xvFUmeo08= +github.com/canonical/lxd v0.0.0-20240124090112-6612e64073cb/go.mod h1:nFZ8lhA3iqXLNBlYWVSoN0zE/J2/XcOgZsUtGdi+GT0= +github.com/canonical/microcluster v0.0.0-20240402231055-e8e97999a2eb h1:xqHKXZPP4Rt7y3WX2K62hTkqbCpf6FYLhQ7z/g/uAw4= +github.com/canonical/microcluster v0.0.0-20240402231055-e8e97999a2eb/go.mod h1:n7VjS1GmcqepY04qahl82bIrqjNrYHUA0zhnYv8c1Q4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -308,7 +308,6 @@ github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -318,15 +317,15 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= -github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/schema v1.2.1 h1:tjDxcmdb+siIqkTNoV+qRH2mjYdr2hHe5MKXbp61ziM= +github.com/gorilla/schema v1.2.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= @@ -435,22 +434,22 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= -github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -505,8 +504,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -547,8 +544,8 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -559,8 +556,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.10.0 h1:62NOS1h+r8p1mW6FM0FSB0exioXLhd/sh15KpjWBZ+8= -github.com/rs/cors v1.10.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -628,8 +625,8 @@ github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMzt github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zitadel/oidc/v2 v2.11.0 h1:Am4/yQr4iiM5bznRgF3FOp+wLdKx2gzSU73uyI9vvBE= -github.com/zitadel/oidc/v2 v2.11.0/go.mod h1:enFSVBQI6aE0TEB1ntjXs9r6O6DEosxX4uhEBLBVD8o= +github.com/zitadel/oidc/v2 v2.12.0 h1:4aMTAy99/4pqNwrawEyJqhRb3yY3PtcDxnoDSryhpn4= +github.com/zitadel/oidc/v2 v2.12.0/go.mod h1:LrRav74IiThHGapQgCHZOUNtnqJG0tcZKHro/91rtLw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -650,8 +647,8 @@ go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyel go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= -go.starlark.net v0.0.0-20230912135651-745481cf39ed h1:kNt8RXSIU6IRBO9MP3m+6q3WpyBHQQXqSktcyVKDPOQ= -go.starlark.net v0.0.0-20230912135651-745481cf39ed/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= +go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -763,8 +760,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -833,11 +830,10 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= @@ -1093,8 +1089,8 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/A k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/src/k8s/pkg/component/dns.go b/src/k8s/pkg/component/dns.go index 1d47e1d6f..6c7abd6eb 100644 --- a/src/k8s/pkg/component/dns.go +++ b/src/k8s/pkg/component/dns.go @@ -136,13 +136,13 @@ func ReconcileDNSComponent(ctx context.Context, s snap.Snap, alreadyEnabled *boo if vals.OptionalBool(requestEnabled, true) && vals.OptionalBool(alreadyEnabled, false) { // If already enabled, and request does not contain `enabled` key // or if already enabled and request contains `enabled=true` - dnsIP, clusterDomain, err := UpdateDNSComponent(ctx, s, true, clusterConfig.Kubelet.ClusterDomain, clusterConfig.Kubelet.ClusterDNS, clusterConfig.DNS.UpstreamNameservers) + dnsIP, clusterDomain, err := UpdateDNSComponent(ctx, s, true, clusterConfig.Kubelet.GetClusterDomain(), clusterConfig.Kubelet.GetClusterDNS(), clusterConfig.DNS.GetUpstreamNameservers()) if err != nil { return "", "", fmt.Errorf("failed to refresh dns: %w", err) } return dnsIP, clusterDomain, nil } else if vals.OptionalBool(requestEnabled, false) { - dnsIP, clusterDomain, err := UpdateDNSComponent(ctx, s, false, clusterConfig.Kubelet.ClusterDomain, clusterConfig.Kubelet.ClusterDNS, clusterConfig.DNS.UpstreamNameservers) + dnsIP, clusterDomain, err := UpdateDNSComponent(ctx, s, false, clusterConfig.Kubelet.GetClusterDomain(), clusterConfig.Kubelet.GetClusterDNS(), clusterConfig.DNS.GetUpstreamNameservers()) if err != nil { return "", "", fmt.Errorf("failed to enable dns: %w", err) } @@ -152,7 +152,7 @@ func ReconcileDNSComponent(ctx context.Context, s snap.Snap, alreadyEnabled *boo if err != nil { return "", "", fmt.Errorf("failed to disable dns: %w", err) } - return clusterConfig.Kubelet.ClusterDNS, clusterConfig.Kubelet.ClusterDomain, nil + return clusterConfig.Kubelet.GetClusterDNS(), clusterConfig.Kubelet.GetClusterDomain(), nil } return "", "", nil } diff --git a/src/k8s/pkg/component/images.go b/src/k8s/pkg/component/images.go index 40745e765..a50559949 100644 --- a/src/k8s/pkg/component/images.go +++ b/src/k8s/pkg/component/images.go @@ -4,10 +4,10 @@ const dnsImageRepository = "ghcr.io/canonical/coredns" const dnsImageTag = "1.11.1-ck1" const ciliumAgentImageRepository = "ghcr.io/canonical/cilium" -const ciliumAgentImageTag = "1.14.5-ck1" +const ciliumAgentImageTag = "1.15.2-ck1" const ciliumOperatorImageRepository = "ghcr.io/canonical/cilium-operator" -const ciliumOperatorImageTag = "1.14.5-ck2" +const ciliumOperatorImageTag = "1.15.2-ck1" const storageImageRepository = "ghcr.io/canonical/rawfile-localpv" const storageImageTag = "0.8.0-ck5" diff --git a/src/k8s/pkg/component/ingress.go b/src/k8s/pkg/component/ingress.go index 89db6d0aa..7e26f8459 100644 --- a/src/k8s/pkg/component/ingress.go +++ b/src/k8s/pkg/component/ingress.go @@ -80,22 +80,16 @@ func DisableIngressComponent(s snap.Snap) error { } func ReconcileIngressComponent(ctx context.Context, s snap.Snap, alreadyEnabled *bool, requestEnabled *bool, clusterConfig types.ClusterConfig) error { - var enableProxyProtocol bool - - if clusterConfig.Ingress.EnableProxyProtocol != nil { - enableProxyProtocol = *clusterConfig.Ingress.EnableProxyProtocol - } - if vals.OptionalBool(requestEnabled, true) && vals.OptionalBool(alreadyEnabled, false) { // If already enabled, and request does not contain `enabled` key // or if already enabled and request contains `enabled=true` - err := UpdateIngressComponent(ctx, s, true, clusterConfig.Ingress.DefaultTLSSecret, enableProxyProtocol) + err := UpdateIngressComponent(ctx, s, true, clusterConfig.Ingress.GetDefaultTLSSecret(), clusterConfig.Ingress.GetEnableProxyProtocol()) if err != nil { return fmt.Errorf("failed to refresh ingress: %w", err) } return nil } else if vals.OptionalBool(requestEnabled, false) { - err := UpdateIngressComponent(ctx, s, false, clusterConfig.Ingress.DefaultTLSSecret, enableProxyProtocol) + err := UpdateIngressComponent(ctx, s, false, clusterConfig.Ingress.GetDefaultTLSSecret(), clusterConfig.Ingress.GetEnableProxyProtocol()) if err != nil { return fmt.Errorf("failed to enable ingress: %w", err) } diff --git a/src/k8s/pkg/component/loadbalancer.go b/src/k8s/pkg/component/loadbalancer.go index 404452664..d137686c6 100644 --- a/src/k8s/pkg/component/loadbalancer.go +++ b/src/k8s/pkg/component/loadbalancer.go @@ -11,7 +11,7 @@ import ( "github.com/canonical/k8s/pkg/utils/vals" ) -func UpdateLoadBalancerComponent(ctx context.Context, s snap.Snap, isRefresh bool, cidrs []string, l2Enabled bool, l2Interfaces []string, bgpEnabled bool, bgpLocalASN int, bgpPeerAddress string, bgpPeerASN int, bgpPeerPort int) error { +func UpdateLoadBalancerComponent(ctx context.Context, s snap.Snap, isRefresh bool, config types.LoadBalancer) error { manager, err := NewHelmClient(s, nil) if err != nil { return fmt.Errorf("failed to get component manager: %w", err) @@ -19,10 +19,10 @@ func UpdateLoadBalancerComponent(ctx context.Context, s snap.Snap, isRefresh boo networkValues := map[string]any{ "l2announcements": map[string]any{ - "enabled": l2Enabled, + "enabled": config.GetL2Mode(), }, "bgpControlPlane": map[string]any{ - "enabled": bgpEnabled, + "enabled": config.GetBGPMode(), }, "externalIPs": map[string]any{ "enabled": true, @@ -55,7 +55,7 @@ func UpdateLoadBalancerComponent(ctx context.Context, s snap.Snap, isRefresh boo "ciliuml2announcementpolicies": {}, "ciliumloadbalancerippools": {}, } - if bgpEnabled { + if config.GetBGPMode() { requiredCRDs["ciliumbgppeeringpolicies"] = struct{}{} } requiredCount := len(requiredCRDs) @@ -69,26 +69,26 @@ func UpdateLoadBalancerComponent(ctx context.Context, s snap.Snap, isRefresh boo formattedCidrs := []map[string]any{} - for _, cidr := range cidrs { + for _, cidr := range config.GetCIDRs() { formattedCidrs = append(formattedCidrs, map[string]any{"cidr": cidr}) } values := map[string]any{ "l2": map[string]any{ - "enabled": l2Enabled, - "interfaces": l2Interfaces, + "enabled": config.GetL2Mode(), + "interfaces": config.GetL2Interfaces(), }, "ipPool": map[string]any{ "cidrs": formattedCidrs, }, "bgp": map[string]any{ - "enabled": bgpEnabled, - "localASN": bgpLocalASN, + "enabled": config.GetBGPMode(), + "localASN": config.GetBGPLocalASN(), "neighbors": []map[string]any{ { - "peerAddress": bgpPeerAddress, - "peerASN": bgpPeerASN, - "peerPort": bgpPeerPort, + "peerAddress": config.GetBGPPeerAddress(), + "peerASN": config.GetBGPPeerASN(), + "peerPort": config.GetBGPPeerPort(), }, }, }, @@ -167,26 +167,16 @@ func DisableLoadBalancerComponent(s snap.Snap) error { } func ReconcileLoadBalancerComponent(ctx context.Context, s snap.Snap, alreadyEnabled *bool, requestEnabled *bool, clusterConfig types.ClusterConfig) error { - var bgpEnabled, l2Enabled bool - - if clusterConfig.LoadBalancer.BGPEnabled != nil { - bgpEnabled = *clusterConfig.LoadBalancer.BGPEnabled - } - - if clusterConfig.LoadBalancer.L2Enabled != nil { - l2Enabled = *clusterConfig.LoadBalancer.L2Enabled - } - if vals.OptionalBool(requestEnabled, true) && vals.OptionalBool(alreadyEnabled, false) { // If already enabled, and request does not contain `enabled` key // or if already enabled and request contains `enabled=true` - err := UpdateLoadBalancerComponent(ctx, s, true, clusterConfig.LoadBalancer.CIDRs, l2Enabled, clusterConfig.LoadBalancer.L2Interfaces, bgpEnabled, clusterConfig.LoadBalancer.BGPLocalASN, clusterConfig.LoadBalancer.BGPPeerAddress, clusterConfig.LoadBalancer.BGPPeerASN, clusterConfig.LoadBalancer.BGPPeerPort) + err := UpdateLoadBalancerComponent(ctx, s, true, clusterConfig.LoadBalancer) if err != nil { return fmt.Errorf("failed to refresh load-balancer: %w", err) } return nil } else if vals.OptionalBool(requestEnabled, false) { - err := UpdateLoadBalancerComponent(ctx, s, false, clusterConfig.LoadBalancer.CIDRs, l2Enabled, clusterConfig.LoadBalancer.L2Interfaces, bgpEnabled, clusterConfig.LoadBalancer.BGPLocalASN, clusterConfig.LoadBalancer.BGPPeerAddress, clusterConfig.LoadBalancer.BGPPeerASN, clusterConfig.LoadBalancer.BGPPeerPort) + err := UpdateLoadBalancerComponent(ctx, s, false, clusterConfig.LoadBalancer) if err != nil { return fmt.Errorf("failed to enable load-balancer: %w", err) } diff --git a/src/k8s/pkg/component/network.go b/src/k8s/pkg/component/network.go index 47b0dc21f..54d89bd16 100644 --- a/src/k8s/pkg/component/network.go +++ b/src/k8s/pkg/component/network.go @@ -70,6 +70,7 @@ func UpdateNetworkComponent(ctx context.Context, s snap.Snap, isRefresh bool, po "nodePort": map[string]any{ "enabled": true, }, + "disableEnvoyVersionCheck": true, } if s.Strict() { @@ -142,13 +143,13 @@ func ReconcileNetworkComponent(ctx context.Context, s snap.Snap, alreadyEnabled if vals.OptionalBool(requestEnabled, true) && vals.OptionalBool(alreadyEnabled, false) { // If already enabled, and request does not contain `enabled` key // or if already enabled and request contains `enabled=true` - err := UpdateNetworkComponent(ctx, s, true, clusterConfig.Network.PodCIDR) + err := UpdateNetworkComponent(ctx, s, true, clusterConfig.Network.GetPodCIDR()) if err != nil { return fmt.Errorf("failed to refresh network: %w", err) } return nil } else if vals.OptionalBool(requestEnabled, false) { - err := UpdateNetworkComponent(ctx, s, false, clusterConfig.Network.PodCIDR) + err := UpdateNetworkComponent(ctx, s, false, clusterConfig.Network.GetPodCIDR()) if err != nil { return fmt.Errorf("failed to enable network: %w", err) } diff --git a/src/k8s/pkg/component/storage.go b/src/k8s/pkg/component/storage.go index 46ce822ad..e256de097 100644 --- a/src/k8s/pkg/component/storage.go +++ b/src/k8s/pkg/component/storage.go @@ -9,7 +9,7 @@ import ( "github.com/canonical/k8s/pkg/utils/vals" ) -func UpdateStorageComponent(ctx context.Context, s snap.Snap, isRefresh bool, localPath string, reclaimPolicy string, setDefault bool) error { +func UpdateStorageComponent(ctx context.Context, s snap.Snap, isRefresh bool, config types.LocalStorage) error { manager, err := NewHelmClient(s, nil) if err != nil { return fmt.Errorf("failed to get component manager: %w", err) @@ -18,8 +18,8 @@ func UpdateStorageComponent(ctx context.Context, s snap.Snap, isRefresh bool, lo values := map[string]any{ "storageClass": map[string]any{ "enabled": true, - "isDefault": setDefault, - "reclaimPolicy": reclaimPolicy, + "isDefault": config.GetSetDefault(), + "reclaimPolicy": config.GetReclaimPolicy(), }, "serviceMonitor": map[string]any{ "enabled": false, @@ -37,7 +37,7 @@ func UpdateStorageComponent(ctx context.Context, s snap.Snap, isRefresh bool, lo "tag": storageImageTag, }, "storage": map[string]any{ - "path": localPath, + "path": config.GetLocalPath(), }, }, } @@ -69,22 +69,16 @@ func DisableStorageComponent(s snap.Snap) error { } func ReconcileLocalStorageComponent(ctx context.Context, s snap.Snap, alreadyEnabled *bool, requestEnabled *bool, clusterConfig types.ClusterConfig) error { - var setDefault bool - - if clusterConfig.LocalStorage.SetDefault != nil { - setDefault = *clusterConfig.LocalStorage.SetDefault - } - if vals.OptionalBool(requestEnabled, true) && vals.OptionalBool(alreadyEnabled, false) { // If already enabled, and request does not contain `enabled` key // or if already enabled and request contains `enabled=true` - err := UpdateStorageComponent(ctx, s, true, clusterConfig.LocalStorage.LocalPath, clusterConfig.LocalStorage.ReclaimPolicy, setDefault) + err := UpdateStorageComponent(ctx, s, true, clusterConfig.LocalStorage) if err != nil { return fmt.Errorf("failed to refresh local-storage: %w", err) } return nil } else if vals.OptionalBool(requestEnabled, false) { - err := UpdateStorageComponent(ctx, s, false, clusterConfig.LocalStorage.LocalPath, clusterConfig.LocalStorage.ReclaimPolicy, setDefault) + err := UpdateStorageComponent(ctx, s, false, clusterConfig.LocalStorage) if err != nil { return fmt.Errorf("failed to enable local-storage: %w", err) } diff --git a/src/k8s/pkg/k8sd/api/cluster.go b/src/k8s/pkg/k8sd/api/cluster.go index f81f3e5e6..61c6c8864 100644 --- a/src/k8s/pkg/k8sd/api/cluster.go +++ b/src/k8s/pkg/k8sd/api/cluster.go @@ -1,22 +1,56 @@ package api import ( + "fmt" "net/http" apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/k8sd/api/impl" + "github.com/canonical/k8s/pkg/utils" + "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/lxd/lxd/response" "github.com/canonical/microcluster/state" ) -func getClusterStatus(s *state.State, r *http.Request) response.Response { - status, err := impl.GetClusterStatus(r.Context(), s) +func (e *Endpoints) getClusterStatus(s *state.State, r *http.Request) response.Response { + snap := e.provider.Snap() + + members, err := impl.GetClusterMembers(s.Context, s) + if err != nil { + return response.InternalError(fmt.Errorf("failed to get cluster members: %w", err)) + } + + config, err := utils.GetClusterConfig(s.Context, s) + if err != nil { + return response.InternalError(fmt.Errorf("failed to get user-facing cluster config: %w", err)) + } + + clusterConfig, err := utils.GetClusterConfig(s.Context, s) + if err != nil { + return response.InternalError(fmt.Errorf("failed to get cluster config: %w", err)) + } + datastoreConfig := apiv1.Datastore{ + Type: clusterConfig.Datastore.GetType(), + ExternalURL: clusterConfig.Datastore.GetExternalURL(), + } + + client, err := k8s.NewClient(snap.KubernetesRESTClientGetter("")) + if err != nil { + return response.InternalError(fmt.Errorf("failed to create k8s client: %w", err)) + } + + ready, err := client.HasReadyNodes(s.Context) if err != nil { - response.InternalError(err) + return response.InternalError(fmt.Errorf("failed to check if cluster has ready nodes: %w", err)) } result := apiv1.GetClusterStatusResponse{ - ClusterStatus: status, + ClusterStatus: apiv1.ClusterStatus{ + Ready: ready, + Members: members, + Config: config.ToUserFacing(), + Datastore: datastoreConfig, + }, } return response.SyncResponse(true, &result) diff --git a/src/k8s/pkg/k8sd/api/cluster_bootstrap.go b/src/k8s/pkg/k8sd/api/cluster_bootstrap.go index 8b2f12b40..fbb818c42 100644 --- a/src/k8s/pkg/k8sd/api/cluster_bootstrap.go +++ b/src/k8s/pkg/k8sd/api/cluster_bootstrap.go @@ -8,22 +8,19 @@ import ( apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/lxd/lxd/response" - "github.com/canonical/microcluster/microcluster" "github.com/canonical/microcluster/state" ) -func postClusterBootstrap(m *microcluster.MicroCluster, s *state.State, r *http.Request) response.Response { +func (e *Endpoints) postClusterBootstrap(s *state.State, r *http.Request) response.Response { req := apiv1.PostClusterBootstrapRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) } - req.Config.SetDefaults() - //Convert Bootstrap config to map - config, err := req.Config.ToMap() + config, err := req.Config.ToMicrocluster() if err != nil { - return response.BadRequest(fmt.Errorf("failed to convert bootstrap config to map: %w", err)) + return response.BadRequest(fmt.Errorf("failed to prepare bootstrap config: %w", err)) } // Clean hostname @@ -33,14 +30,13 @@ func postClusterBootstrap(m *microcluster.MicroCluster, s *state.State, r *http. } // Check if the cluster is already bootstrapped - _, err = m.Status() + _, err = e.provider.MicroCluster().Status() if err == nil { return response.BadRequest(fmt.Errorf("cluster is already bootstrapped")) } // Bootstrap the cluster - // Timeout 0 should leave the timeout to context via the m.ctx - if err := m.NewCluster(hostname, req.Address, config, 0); err != nil { + if err := e.provider.MicroCluster().NewCluster(hostname, req.Address, config, 0); err != nil { // TODO move node cleanup here return response.BadRequest(fmt.Errorf("failed to bootstrap new cluster: %w", err)) } diff --git a/src/k8s/pkg/k8sd/api/cluster_config.go b/src/k8s/pkg/k8sd/api/cluster_config.go index 109a1ff57..7b57ecdf2 100644 --- a/src/k8s/pkg/k8sd/api/cluster_config.go +++ b/src/k8s/pkg/k8sd/api/cluster_config.go @@ -5,7 +5,6 @@ import ( "database/sql" "encoding/json" "fmt" - "net" "net/http" api "github.com/canonical/k8s/api/v1" @@ -13,7 +12,6 @@ import ( "github.com/canonical/k8s/pkg/component" "github.com/canonical/k8s/pkg/k8sd/database" "github.com/canonical/k8s/pkg/k8sd/types" - "github.com/canonical/k8s/pkg/snap" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/k8s/pkg/utils/vals" @@ -21,121 +19,25 @@ import ( "github.com/canonical/microcluster/state" ) -func validateConfig(oldConfig types.ClusterConfig, newConfig types.ClusterConfig) error { - // If load-balancer, ingress or gateway gets enabled=true, - // the request should fail if network.enabled is not true - if !vals.OptionalBool(newConfig.Network.Enabled, false) { - if !vals.OptionalBool(oldConfig.Ingress.Enabled, false) && vals.OptionalBool(newConfig.Ingress.Enabled, false) { - return fmt.Errorf("ingress requires network to be enabled") - } - - if !vals.OptionalBool(oldConfig.Gateway.Enabled, false) && vals.OptionalBool(newConfig.Gateway.Enabled, false) { - return fmt.Errorf("gateway requires network to be enabled") - } - - if !vals.OptionalBool(oldConfig.LoadBalancer.Enabled, false) && vals.OptionalBool(newConfig.LoadBalancer.Enabled, false) { - return fmt.Errorf("load-balancer requires network to be enabled") - } - } - - // dns.service-ip should be in IP format and in service CIDR - if newConfig.Kubelet.ClusterDNS != "" && net.ParseIP(newConfig.Kubelet.ClusterDNS) == nil { - return fmt.Errorf("dns.service-ip must be in valid IP format") - } - - // dns.service-ip is not changable if already dns.enabled=true. - if vals.OptionalBool(newConfig.DNS.Enabled, false) && vals.OptionalBool(oldConfig.DNS.Enabled, false) { - if newConfig.Kubelet.ClusterDNS != oldConfig.Kubelet.ClusterDNS { - return fmt.Errorf("dns.service-ip can not be changed after dns is enabled") - } - } - - // load-balancer.bgp-mode=true should fail if any of the bgp config is empty - if vals.OptionalBool(newConfig.LoadBalancer.BGPEnabled, false) { - if newConfig.LoadBalancer.BGPLocalASN == 0 { - return fmt.Errorf("load-balancer.bgp-local-asn must be set when load-balancer.bgp-mode is enabled") - } - if newConfig.LoadBalancer.BGPPeerAddress == "" { - return fmt.Errorf("load-balancer.bgp-peer-address must be set when load-balancer.bgp-mode is enabled") - } - if newConfig.LoadBalancer.BGPPeerPort == 0 { - return fmt.Errorf("load-balancer.bgp-peer-port must be set when load-balancer.bgp-mode is enabled") - } - if newConfig.LoadBalancer.BGPPeerASN == 0 { - return fmt.Errorf("load-balancer.bgp-peer-asn must be set when load-balancer.bgp-mode is enabled") - } - } - - // local-storage.local-path should not be changable if local-storage.enabled=true - if vals.OptionalBool(newConfig.LocalStorage.Enabled, false) && vals.OptionalBool(oldConfig.LocalStorage.Enabled, false) { - if newConfig.LocalStorage.LocalPath != oldConfig.LocalStorage.LocalPath { - return fmt.Errorf("local-storage.local-path can not be changed after local-storage is enabled") - } - } - - // local-storage.reclaim-policy should be one of 3 values - switch newConfig.LocalStorage.ReclaimPolicy { - case "Retain", "Recycle", "Delete": - default: - return fmt.Errorf("local-storage.reclaim-policy must be one of: Retain, Recycle, Delete") - } - - // local-storage.reclaim-policy should not be changable if local-storage.enabled=true - if vals.OptionalBool(newConfig.LocalStorage.Enabled, false) && vals.OptionalBool(oldConfig.LocalStorage.Enabled, false) { - if newConfig.LocalStorage.ReclaimPolicy != oldConfig.LocalStorage.ReclaimPolicy { - return fmt.Errorf("local-storage.reclaim-policy can not be changed after local-storage is enabled") - } - } - - // network.enabled=false should not work before load-balancer, ingress and gateway is disabled - if vals.OptionalBool(oldConfig.Network.Enabled, false) && !vals.OptionalBool(newConfig.Network.Enabled, false) { - if vals.OptionalBool(newConfig.Ingress.Enabled, false) { - return fmt.Errorf("ingress must be disabled before network can be disabled") - } - if vals.OptionalBool(newConfig.Gateway.Enabled, false) { - return fmt.Errorf("gateway must be disabled before network can be disabled") - } - if vals.OptionalBool(newConfig.LoadBalancer.Enabled, false) { - return fmt.Errorf("load-balancer must be disabled before network can be disabled") - } - } - - return nil -} - -func putClusterConfig(s *state.State, r *http.Request) response.Response { +func (e *Endpoints) putClusterConfig(s *state.State, r *http.Request) response.Response { var req api.UpdateClusterConfigRequest - snap := snap.SnapFromContext(s.Context) + snap := e.provider.Snap() if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return response.BadRequest(fmt.Errorf("failed to decode request: %w", err)) } - var oldConfig types.ClusterConfig - - if err := s.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { - var err error - oldConfig, err = database.GetClusterConfig(ctx, tx) - if err != nil { - return fmt.Errorf("failed to read old cluster configuration: %w", err) - } - - return nil - }); err != nil { - return response.InternalError(fmt.Errorf("database transaction to read cluster configuration failed: %w", err)) - } - - newConfig, err := types.MergeClusterConfig(oldConfig, types.ClusterConfigFromUserFacing(&req.Config)) + oldConfig, err := utils.GetClusterConfig(r.Context(), s) if err != nil { - return response.InternalError(fmt.Errorf("failed to merge new cluster config: %w", err)) - } - - if err := validateConfig(oldConfig, newConfig); err != nil { - return response.InternalError(fmt.Errorf("config validation failed: %w", err)) + return response.InternalError(fmt.Errorf("failed to retrieve cluster configuration: %w", err)) } + requestedConfig := types.ClusterConfigFromUserFacing(req.Config) + var mergedConfig types.ClusterConfig if err := s.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { - if err := database.SetClusterConfig(ctx, tx, newConfig); err != nil { + var err error + mergedConfig, err = database.SetClusterConfig(ctx, tx, requestedConfig) + if err != nil { return fmt.Errorf("failed to update cluster configuration: %w", err) } @@ -144,38 +46,40 @@ func putClusterConfig(s *state.State, r *http.Request) response.Response { return response.InternalError(fmt.Errorf("database transaction to update cluster configuration failed: %w", err)) } - if req.Config.Network != nil { - err := component.ReconcileNetworkComponent(r.Context(), snap, oldConfig.Network.Enabled, req.Config.Network.Enabled, newConfig) - if err != nil { + if !requestedConfig.Network.Empty() { + if err := component.ReconcileNetworkComponent(r.Context(), snap, oldConfig.Network.Enabled, requestedConfig.Network.Enabled, mergedConfig); err != nil { return response.InternalError(fmt.Errorf("failed to reconcile network: %w", err)) } } - var dnsIP = newConfig.Kubelet.ClusterDNS - if req.Config.DNS != nil { - dnsIP, _, err = component.ReconcileDNSComponent(r.Context(), snap, oldConfig.DNS.Enabled, req.Config.DNS.Enabled, newConfig) + if !requestedConfig.DNS.Empty() { + dnsIP, _, err := component.ReconcileDNSComponent(r.Context(), snap, oldConfig.DNS.Enabled, requestedConfig.DNS.Enabled, mergedConfig) if err != nil { return response.InternalError(fmt.Errorf("failed to reconcile dns: %w", err)) } - if err := s.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { - if err := database.SetClusterConfig(ctx, tx, types.ClusterConfig{ - Kubelet: types.Kubelet{ - ClusterDNS: dnsIP, - }, + // If DNS IP is not empty, update cluster configuration + if dnsIP != "" { + if err := s.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { + mergedConfig, err = database.SetClusterConfig(ctx, tx, types.ClusterConfig{ + Kubelet: types.Kubelet{ + ClusterDNS: vals.Pointer(dnsIP), + }, + }) + if err != nil { + return fmt.Errorf("failed to update cluster configuration for dns=%s: %w", dnsIP, err) + } + return nil }); err != nil { - return fmt.Errorf("failed to update cluster configuration for dns=%s: %w", dnsIP, err) + return response.InternalError(fmt.Errorf("database transaction to update cluster configuration failed: %w", err)) } - return nil - }); err != nil { - return response.InternalError(fmt.Errorf("database transaction to update cluster configuration failed: %w", err)) } } - cmData := types.MapFromNodeConfig(types.NodeConfig{ - ClusterDNS: &dnsIP, - ClusterDomain: &newConfig.Kubelet.ClusterDomain, - }) + cmData, err := mergedConfig.Kubelet.ToConfigMap() + if err != nil { + return response.InternalError(fmt.Errorf("failed to format kubelet configmap data: %w", err)) + } client, err := k8s.NewClient(snap.KubernetesRESTClientGetter("")) if err != nil { @@ -186,53 +90,47 @@ func putClusterConfig(s *state.State, r *http.Request) response.Response { return response.InternalError(fmt.Errorf("failed to update node config: %w", err)) } - if req.Config.LocalStorage != nil { - err := component.ReconcileLocalStorageComponent(r.Context(), snap, oldConfig.LocalStorage.Enabled, req.Config.LocalStorage.Enabled, newConfig) - if err != nil { + if !requestedConfig.LocalStorage.Empty() { + if err := component.ReconcileLocalStorageComponent(r.Context(), snap, oldConfig.LocalStorage.Enabled, requestedConfig.LocalStorage.Enabled, mergedConfig); err != nil { return response.InternalError(fmt.Errorf("failed to reconcile local-storage: %w", err)) } } - if req.Config.Gateway != nil { - err := component.ReconcileGatewayComponent(r.Context(), snap, oldConfig.Gateway.Enabled, req.Config.Gateway.Enabled, newConfig) - if err != nil { + if !requestedConfig.Gateway.Empty() { + if err := component.ReconcileGatewayComponent(r.Context(), snap, oldConfig.Gateway.Enabled, requestedConfig.Gateway.Enabled, mergedConfig); err != nil { return response.InternalError(fmt.Errorf("failed to reconcile gateway: %w", err)) } } - if req.Config.Ingress != nil { - err := component.ReconcileIngressComponent(r.Context(), snap, oldConfig.Ingress.Enabled, req.Config.Ingress.Enabled, newConfig) - if err != nil { + if !requestedConfig.Ingress.Empty() { + if err := component.ReconcileIngressComponent(r.Context(), snap, oldConfig.Ingress.Enabled, requestedConfig.Ingress.Enabled, mergedConfig); err != nil { return response.InternalError(fmt.Errorf("failed to reconcile ingress: %w", err)) } } - if req.Config.LoadBalancer != nil { - err := component.ReconcileLoadBalancerComponent(r.Context(), snap, oldConfig.LoadBalancer.Enabled, req.Config.LoadBalancer.Enabled, newConfig) - if err != nil { + if !requestedConfig.LoadBalancer.Empty() { + if err := component.ReconcileLoadBalancerComponent(r.Context(), snap, oldConfig.LoadBalancer.Enabled, requestedConfig.LoadBalancer.Enabled, mergedConfig); err != nil { return response.InternalError(fmt.Errorf("failed to reconcile load-balancer: %w", err)) } } - if req.Config.MetricsServer != nil { - err := component.ReconcileMetricsServerComponent(r.Context(), snap, oldConfig.MetricsServer.Enabled, req.Config.MetricsServer.Enabled, newConfig) - if err != nil { - return response.InternalError(fmt.Errorf("failed to reconcile metrics-server: %w", err)) + if !requestedConfig.MetricsServer.Empty() { + if err := component.ReconcileMetricsServerComponent(r.Context(), snap, oldConfig.MetricsServer.Enabled, requestedConfig.MetricsServer.Enabled, mergedConfig); err != nil { + return response.InternalError(fmt.Errorf("failed to reconcile load-balancer: %w", err)) } } return response.SyncResponse(true, &api.UpdateClusterConfigResponse{}) } -func getClusterConfig(s *state.State, r *http.Request) response.Response { - userFacing, err := utils.GetUserFacingClusterConfig(r.Context(), s) +func (e *Endpoints) getClusterConfig(s *state.State, r *http.Request) response.Response { + config, err := utils.GetClusterConfig(r.Context(), s) if err != nil { - return response.InternalError(fmt.Errorf("failed to get user-facing cluster config: %w", err)) + return response.InternalError(fmt.Errorf("failed to retrieve cluster configuration: %w", err)) } result := api.GetClusterConfigResponse{ - Config: userFacing, + Config: config.ToUserFacing(), } - return response.SyncResponse(true, &result) } diff --git a/src/k8s/pkg/k8sd/api/cluster_config_test.go b/src/k8s/pkg/k8sd/api/cluster_config_test.go deleted file mode 100644 index 3760af50f..000000000 --- a/src/k8s/pkg/k8sd/api/cluster_config_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package api - -import ( - "testing" - - "github.com/canonical/k8s/pkg/k8sd/types" - "github.com/canonical/k8s/pkg/utils/vals" - . "github.com/onsi/gomega" -) - -func DefaultConfig() types.ClusterConfig { - config := types.ClusterConfig{} - config.SetDefaults() - return config -} - -func TestValidateConfig(t *testing.T) { - tests := []struct { - name string - oldConfig types.ClusterConfig - newConfig types.ClusterConfig - expectedError string - }{ - { - name: "Disable network should not work before load-balancer is disabled", - oldConfig: types.ClusterConfig{ - Network: types.Network{ - Enabled: vals.Pointer(true), - }, - LoadBalancer: types.LoadBalancer{ - Enabled: vals.Pointer(true), - }, - }, - newConfig: types.ClusterConfig{ - Network: types.Network{ - Enabled: vals.Pointer(false), - }, - LoadBalancer: types.LoadBalancer{ - Enabled: vals.Pointer(true), - }, - }, - expectedError: "load-balancer must be disabled", - }, - { - name: "Disable network should work if load-balancer is also disabled in same request", - oldConfig: types.ClusterConfig{ - Network: types.Network{ - Enabled: vals.Pointer(true), - }, - LoadBalancer: types.LoadBalancer{ - Enabled: vals.Pointer(true), - }, - }, - newConfig: types.ClusterConfig{ - Network: types.Network{ - Enabled: vals.Pointer(false), - }, - LoadBalancer: types.LoadBalancer{ - Enabled: vals.Pointer(false), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - newConfig, err := types.MergeClusterConfig(DefaultConfig(), tt.newConfig) - g.Expect(err).To(BeNil()) - - err = validateConfig(tt.oldConfig, newConfig) - if tt.expectedError == "" { - g.Expect(err).To(BeNil()) - } else { - g.Expect(err).ToNot(BeNil()) - g.Expect(err.Error()).To(ContainSubstring(tt.expectedError)) - } - }) - } -} diff --git a/src/k8s/pkg/k8sd/api/cluster_join.go b/src/k8s/pkg/k8sd/api/cluster_join.go index 67c9f1106..f71de2174 100644 --- a/src/k8s/pkg/k8sd/api/cluster_join.go +++ b/src/k8s/pkg/k8sd/api/cluster_join.go @@ -4,17 +4,15 @@ import ( "encoding/json" "fmt" "net/http" - "time" apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/k8sd/types" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/lxd/lxd/response" - "github.com/canonical/microcluster/microcluster" "github.com/canonical/microcluster/state" ) -func postClusterJoin(m *microcluster.MicroCluster, s *state.State, r *http.Request) response.Response { +func (e *Endpoints) postClusterJoin(s *state.State, r *http.Request) response.Response { req := apiv1.JoinClusterRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) @@ -25,19 +23,17 @@ func postClusterJoin(m *microcluster.MicroCluster, s *state.State, r *http.Reque return response.BadRequest(fmt.Errorf("invalid hostname %q: %w", req.Name, err)) } - timeout := utils.TimeoutFromCtx(r.Context(), 30*time.Second) - internalToken := types.InternalWorkerNodeToken{} // Check if token is worker token if internalToken.Decode(req.Token) == nil { // valid worker node token - let's join the cluster // The validation of the token is done when fetching the cluster information. - if err := m.NewCluster(hostname, req.Address, map[string]string{"workerToken": req.Token}, timeout); err != nil { + if err := e.provider.MicroCluster().NewCluster(hostname, req.Address, map[string]string{"workerToken": req.Token}, 0); err != nil { return response.InternalError(fmt.Errorf("failed to join k8sd cluster as worker: %w", err)) } } else { // Is not a worker token. let microcluster check if it is a valid control-plane token. - if err := m.JoinCluster(hostname, req.Address, req.Token, nil, timeout); err != nil { + if err := e.provider.MicroCluster().JoinCluster(hostname, req.Address, req.Token, nil, 0); err != nil { return response.InternalError(fmt.Errorf("failed to join k8sd cluster as control plane: %w", err)) } } diff --git a/src/k8s/pkg/k8sd/api/cluster_remove.go b/src/k8s/pkg/k8sd/api/cluster_remove.go index b80671dc1..5dab12897 100644 --- a/src/k8s/pkg/k8sd/api/cluster_remove.go +++ b/src/k8s/pkg/k8sd/api/cluster_remove.go @@ -6,16 +6,14 @@ import ( "net/http" apiv1 "github.com/canonical/k8s/api/v1" - "github.com/canonical/k8s/pkg/snap" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/lxd/lxd/response" - "github.com/canonical/microcluster/microcluster" "github.com/canonical/microcluster/state" ) -func postClusterRemove(m *microcluster.MicroCluster, s *state.State, r *http.Request) response.Response { - snap := snap.SnapFromContext(s.Context) +func (e *Endpoints) postClusterRemove(s *state.State, r *http.Request) response.Response { + snap := e.provider.Snap() req := apiv1.RemoveNodeRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { @@ -29,7 +27,7 @@ func postClusterRemove(m *microcluster.MicroCluster, s *state.State, r *http.Req if isControlPlane { // Remove control plane via microcluster API. // The postRemove hook will take care of cleaning up kubernetes. - c, err := m.LocalClient() + c, err := e.provider.MicroCluster().LocalClient() if err != nil { return response.InternalError(fmt.Errorf("failed to create local client: %w", err)) } diff --git a/src/k8s/pkg/k8sd/api/cluster_tokens.go b/src/k8s/pkg/k8sd/api/cluster_tokens.go index 1281c36ed..d4a7f8783 100644 --- a/src/k8s/pkg/k8sd/api/cluster_tokens.go +++ b/src/k8s/pkg/k8sd/api/cluster_tokens.go @@ -16,7 +16,7 @@ import ( "github.com/canonical/microcluster/state" ) -func postClusterJoinTokens(m *microcluster.MicroCluster, s *state.State, r *http.Request) response.Response { +func (e *Endpoints) postClusterJoinTokens(s *state.State, r *http.Request) response.Response { req := apiv1.GetJoinTokenRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) @@ -31,7 +31,7 @@ func postClusterJoinTokens(m *microcluster.MicroCluster, s *state.State, r *http if req.Worker { token, err = getOrCreateWorkerToken(s, hostname) } else { - token, err = getOrCreateJoinToken(m, hostname) + token, err = getOrCreateJoinToken(e.provider.MicroCluster(), hostname) } if err != nil { return response.InternalError(fmt.Errorf("failed to create token: %w", err)) diff --git a/src/k8s/pkg/k8sd/api/endpoints.go b/src/k8s/pkg/k8sd/api/endpoints.go index 882c45f99..77e0ae2b7 100644 --- a/src/k8s/pkg/k8sd/api/endpoints.go +++ b/src/k8s/pkg/k8sd/api/endpoints.go @@ -2,19 +2,27 @@ package api import ( - "github.com/canonical/microcluster/microcluster" "github.com/canonical/microcluster/rest" ) +type Endpoints struct { + provider Provider +} + +// New creates a new Endpoints instance. +func New(provider Provider) *Endpoints { + return &Endpoints{provider: provider} +} + // Endpoints returns the list of endpoints for a given microcluster app. -func Endpoints(app *microcluster.MicroCluster) []rest.Endpoint { +func (e *Endpoints) Endpoints() []rest.Endpoint { return []rest.Endpoint{ // Cluster status and bootstrap { Name: "Cluster", Path: "k8sd/cluster", - Get: rest.EndpointAction{Handler: getClusterStatus, AccessHandler: RestrictWorkers}, - Post: rest.EndpointAction{Handler: wrapHandlerWithMicroCluster(app, postClusterBootstrap)}, + Get: rest.EndpointAction{Handler: e.getClusterStatus, AccessHandler: e.restrictWorkers}, + Post: rest.EndpointAction{Handler: e.postClusterBootstrap}, AllowedBeforeInit: true, }, // Node @@ -22,19 +30,19 @@ func Endpoints(app *microcluster.MicroCluster) []rest.Endpoint { { Name: "NodeStatus", Path: "k8sd/node", - Get: rest.EndpointAction{Handler: getNodeStatus}, + Get: rest.EndpointAction{Handler: e.getNodeStatus}, }, // Clustering // Unified token endpoint for both, control-plane and worker-node. { Name: "ClusterJoinTokens", Path: "k8sd/cluster/tokens", - Post: rest.EndpointAction{Handler: wrapHandlerWithMicroCluster(app, postClusterJoinTokens), AccessHandler: RestrictWorkers}, + Post: rest.EndpointAction{Handler: e.postClusterJoinTokens, AccessHandler: e.restrictWorkers}, }, { Name: "ClusterJoin", Path: "k8sd/cluster/join", - Post: rest.EndpointAction{Handler: wrapHandlerWithMicroCluster(app, postClusterJoin)}, + Post: rest.EndpointAction{Handler: e.postClusterJoin}, // Joining a node is a bootstrapping action which needs to be available before k8sd is initialized. AllowedBeforeInit: true, }, @@ -42,7 +50,7 @@ func Endpoints(app *microcluster.MicroCluster) []rest.Endpoint { { Name: "ClusterRemove", Path: "k8sd/cluster/remove", - Post: rest.EndpointAction{Handler: wrapHandlerWithMicroCluster(app, postClusterRemove), AccessHandler: RestrictWorkers}, + Post: rest.EndpointAction{Handler: e.postClusterRemove, AccessHandler: e.restrictWorkers}, }, // Worker nodes { @@ -50,7 +58,7 @@ func Endpoints(app *microcluster.MicroCluster) []rest.Endpoint { Path: "k8sd/worker/info", // AllowUntrusted disabled the microcluster authorization check. Authorization is done via custom token. Post: rest.EndpointAction{ - Handler: postWorkerInfo, + Handler: e.postWorkerInfo, AllowUntrusted: true, AccessHandler: ValidateWorkerInfoAccessHandler("worker-name", "worker-token"), }, @@ -59,27 +67,27 @@ func Endpoints(app *microcluster.MicroCluster) []rest.Endpoint { { Name: "Kubeconfig", Path: "k8sd/kubeconfig", - Get: rest.EndpointAction{Handler: getKubeconfig, AccessHandler: RestrictWorkers}, + Get: rest.EndpointAction{Handler: e.getKubeconfig, AccessHandler: e.restrictWorkers}, }, // Get and modify the cluster configuration (e.g. to enable/disable functionalities) { Name: "ClusterConfig", Path: "k8sd/cluster/config", - Put: rest.EndpointAction{Handler: putClusterConfig, AccessHandler: RestrictWorkers}, - Get: rest.EndpointAction{Handler: getClusterConfig, AccessHandler: RestrictWorkers}, + Put: rest.EndpointAction{Handler: e.putClusterConfig, AccessHandler: e.restrictWorkers}, + Get: rest.EndpointAction{Handler: e.getClusterConfig, AccessHandler: e.restrictWorkers}, }, // Kubernetes auth tokens and token review webhook for kube-apiserver { Name: "KubernetesAuthTokens", Path: "kubernetes/auth/tokens", - Get: rest.EndpointAction{Handler: getKubernetesAuthTokens, AllowUntrusted: true}, - Post: rest.EndpointAction{Handler: postKubernetesAuthTokens}, - Delete: rest.EndpointAction{Handler: deleteKubernetesAuthTokens}, + Get: rest.EndpointAction{Handler: e.getKubernetesAuthTokens, AllowUntrusted: true}, + Post: rest.EndpointAction{Handler: e.postKubernetesAuthTokens}, + Delete: rest.EndpointAction{Handler: e.deleteKubernetesAuthTokens}, }, { Name: "KubernetesAuthWebhook", Path: "kubernetes/auth/webhook", - Post: rest.EndpointAction{Handler: postKubernetesAuthWebhook, AllowUntrusted: true}, + Post: rest.EndpointAction{Handler: e.postKubernetesAuthWebhook, AllowUntrusted: true}, }, } } diff --git a/src/k8s/pkg/k8sd/api/handler.go b/src/k8s/pkg/k8sd/api/handler.go deleted file mode 100644 index 2b6d7b00f..000000000 --- a/src/k8s/pkg/k8sd/api/handler.go +++ /dev/null @@ -1,22 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/canonical/lxd/lxd/response" - "github.com/canonical/microcluster/microcluster" - "github.com/canonical/microcluster/state" -) - -// handler is the handler type for microcluster endpoints. -type handler func(*state.State, *http.Request) response.Response - -// handlerWithMicroCluster is the handler type for endpoints that also need access to the microcluster instance. -type handlerWithMicroCluster func(*microcluster.MicroCluster, *state.State, *http.Request) response.Response - -// wrapHandlerWithMicroCluster creates a microcluster handler from a handlerWithMicroCluster by capturing the microcluster instance. -func wrapHandlerWithMicroCluster(m *microcluster.MicroCluster, handler handlerWithMicroCluster) handler { - return func(s *state.State, r *http.Request) response.Response { - return handler(m, s, r) - } -} diff --git a/src/k8s/pkg/k8sd/api/impl/k8sd.go b/src/k8s/pkg/k8sd/api/impl/k8sd.go index cf50bc194..eb6999628 100644 --- a/src/k8s/pkg/k8sd/api/impl/k8sd.go +++ b/src/k8s/pkg/k8sd/api/impl/k8sd.go @@ -9,41 +9,9 @@ import ( "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils" - "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/microcluster/state" ) -// GetClusterStatus retrieves the status of the cluster, including information about its members. -func GetClusterStatus(ctx context.Context, s *state.State) (apiv1.ClusterStatus, error) { - snap := snap.SnapFromContext(s.Context) - - members, err := GetClusterMembers(ctx, s) - if err != nil { - return apiv1.ClusterStatus{}, fmt.Errorf("failed to get cluster members: %w", err) - } - - config, err := utils.GetUserFacingClusterConfig(ctx, s) - if err != nil { - return apiv1.ClusterStatus{}, fmt.Errorf("failed to get user-facing cluster config: %w", err) - } - - client, err := k8s.NewClient(snap.KubernetesRESTClientGetter("")) - if err != nil { - return apiv1.ClusterStatus{}, fmt.Errorf("failed to create k8s client: %w", err) - } - - ready, err := client.HasReadyNodes(ctx) - if err != nil { - return apiv1.ClusterStatus{}, fmt.Errorf("failed to check if cluster has ready nodes: %w", err) - } - - return apiv1.ClusterStatus{ - Ready: ready, - Members: members, - Config: config, - }, nil -} - // GetClusterMembers retrieves information about the members of the cluster. func GetClusterMembers(ctx context.Context, s *state.State) ([]apiv1.NodeStatus, error) { c, err := s.Leader() @@ -71,9 +39,7 @@ func GetClusterMembers(ctx context.Context, s *state.State) ([]apiv1.NodeStatus, // GetLocalNodeStatus retrieves the status of the local node, including its roles within the cluster. // Unlike "GetClusterMembers" this also works on a worker node. -func GetLocalNodeStatus(ctx context.Context, s *state.State) (apiv1.NodeStatus, error) { - snap := snap.SnapFromContext(s.Context) - +func GetLocalNodeStatus(ctx context.Context, s *state.State, snap snap.Snap) (apiv1.NodeStatus, error) { // Determine cluster role. clusterRole := apiv1.ClusterRoleUnknown isWorker, err := snaputil.IsWorker(snap) diff --git a/src/k8s/pkg/k8sd/api/kubeconfig.go b/src/k8s/pkg/k8sd/api/kubeconfig.go index 288f48e9e..9acd0d16b 100644 --- a/src/k8s/pkg/k8sd/api/kubeconfig.go +++ b/src/k8s/pkg/k8sd/api/kubeconfig.go @@ -13,28 +13,27 @@ import ( "github.com/canonical/microcluster/state" ) -func getKubeconfig(s *state.State, r *http.Request) response.Response { +func (e *Endpoints) getKubeconfig(s *state.State, r *http.Request) response.Response { req := apiv1.GetKubeConfigRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) } // Fetch pieces needed to render an admin kubeconfig: ca, server, token - clusterCfg, err := utils.GetClusterConfig(r.Context(), s) + config, err := utils.GetClusterConfig(r.Context(), s) if err != nil { return response.InternalError(fmt.Errorf("failed to retrieve cluster config: %w", err)) } - ca := clusterCfg.Certificates.CACert server := req.Server if req.Server == "" { - server = fmt.Sprintf("%s:%d", s.Address().Hostname(), clusterCfg.APIServer.SecurePort) + server = fmt.Sprintf("%s:%d", s.Address().Hostname(), config.APIServer.GetSecurePort()) } token, err := impl.GetOrCreateAuthToken(s.Context, s, "kubernetes-admin", []string{"system:masters"}) if err != nil { return response.InternalError(fmt.Errorf("failed to get admin token: %w", err)) } - kubeconfig, err := setup.KubeconfigString(token, server, ca) + kubeconfig, err := setup.KubeconfigString(token, server, config.Certificates.GetCACert()) if err != nil { return response.InternalError(fmt.Errorf("failed to get kubeconfig: %w", err)) } diff --git a/src/k8s/pkg/k8sd/api/kubernetes_auth_tokens.go b/src/k8s/pkg/k8sd/api/kubernetes_auth_tokens.go index 4fa9fff36..2b9b437a3 100644 --- a/src/k8s/pkg/k8sd/api/kubernetes_auth_tokens.go +++ b/src/k8s/pkg/k8sd/api/kubernetes_auth_tokens.go @@ -16,12 +16,12 @@ import ( "github.com/canonical/microcluster/state" ) -func getKubernetesAuthTokens(state *state.State, r *http.Request) response.Response { +func (e *Endpoints) getKubernetesAuthTokens(s *state.State, r *http.Request) response.Response { token := r.Header.Get("token") var username string var groups []string - if err := state.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { + if err := s.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { var err error username, groups, err = database.CheckToken(ctx, tx, token) return err @@ -32,13 +32,13 @@ func getKubernetesAuthTokens(state *state.State, r *http.Request) response.Respo return response.SyncResponse(true, apiv1.CheckKubernetesAuthTokenResponse{Username: username, Groups: groups}) } -func postKubernetesAuthTokens(state *state.State, r *http.Request) response.Response { +func (e *Endpoints) postKubernetesAuthTokens(s *state.State, r *http.Request) response.Response { request := apiv1.GenerateKubernetesAuthTokenRequest{} if err := json.NewDecoder(r.Body).Decode(&request); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) } - token, err := impl.GetOrCreateAuthToken(r.Context(), state, request.Username, request.Groups) + token, err := impl.GetOrCreateAuthToken(r.Context(), s, request.Username, request.Groups) if err != nil { return response.InternalError(err) } @@ -46,13 +46,13 @@ func postKubernetesAuthTokens(state *state.State, r *http.Request) response.Resp return response.SyncResponse(true, apiv1.CreateKubernetesAuthTokenResponse{Token: token}) } -func deleteKubernetesAuthTokens(state *state.State, r *http.Request) response.Response { +func (e *Endpoints) deleteKubernetesAuthTokens(s *state.State, r *http.Request) response.Response { request := apiv1.RevokeKubernetesAuthTokenRequest{} if err := json.NewDecoder(r.Body).Decode(&request); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) } - err := impl.RevokeAuthToken(r.Context(), state, request.Token) + err := impl.RevokeAuthToken(r.Context(), s, request.Token) if err != nil { return response.InternalError(fmt.Errorf("failed to revoke auth token: %w", err)) } @@ -62,7 +62,7 @@ func deleteKubernetesAuthTokens(state *state.State, r *http.Request) response.Re // postKubernetesAuthWebhook is used by kube-apiserver to handle TokenReview objects. // Note that we do not use the normal response.SyncResponse here, because it breaks the response format that kube-apiserver expects. -func postKubernetesAuthWebhook(state *state.State, r *http.Request) response.Response { +func (e *Endpoints) postKubernetesAuthWebhook(s *state.State, r *http.Request) response.Response { review := apiv1.TokenReview{ APIVersion: "authentication.k8s.io/v1", Kind: "TokenReview", @@ -96,7 +96,7 @@ func postKubernetesAuthWebhook(state *state.State, r *http.Request) response.Res // check token var username string var groups []string - if err := state.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { + if err := s.Database.Transaction(r.Context(), func(ctx context.Context, tx *sql.Tx) error { var err error username, groups, err = database.CheckToken(ctx, tx, review.Spec.Token) return err diff --git a/src/k8s/pkg/k8sd/api/node.go b/src/k8s/pkg/k8sd/api/node.go index d67fd0751..bc9664f5f 100644 --- a/src/k8s/pkg/k8sd/api/node.go +++ b/src/k8s/pkg/k8sd/api/node.go @@ -9,8 +9,10 @@ import ( "github.com/canonical/microcluster/state" ) -func getNodeStatus(s *state.State, r *http.Request) response.Response { - status, err := impl.GetLocalNodeStatus(r.Context(), s) +func (e *Endpoints) getNodeStatus(s *state.State, r *http.Request) response.Response { + snap := e.provider.Snap() + + status, err := impl.GetLocalNodeStatus(r.Context(), s, snap) if err != nil { response.InternalError(err) } diff --git a/src/k8s/pkg/k8sd/api/provider.go b/src/k8s/pkg/k8sd/api/provider.go new file mode 100644 index 000000000..69862efe7 --- /dev/null +++ b/src/k8s/pkg/k8sd/api/provider.go @@ -0,0 +1,12 @@ +package api + +import ( + "github.com/canonical/k8s/pkg/snap" + "github.com/canonical/microcluster/microcluster" +) + +// Provider is an interface for state that the API endpoints need access to. +type Provider interface { + MicroCluster() *microcluster.MicroCluster + Snap() snap.Snap +} diff --git a/src/k8s/pkg/k8sd/api/worker.go b/src/k8s/pkg/k8sd/api/worker.go index 41561f022..16d68a5c3 100644 --- a/src/k8s/pkg/k8sd/api/worker.go +++ b/src/k8s/pkg/k8sd/api/worker.go @@ -11,14 +11,15 @@ import ( apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/k8sd/database" "github.com/canonical/k8s/pkg/k8sd/pki" - "github.com/canonical/k8s/pkg/snap" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/lxd/lxd/response" "github.com/canonical/microcluster/state" ) -func postWorkerInfo(s *state.State, r *http.Request) response.Response { +func (e *Endpoints) postWorkerInfo(s *state.State, r *http.Request) response.Response { + snap := e.provider.Snap() + req := apiv1.WorkerNodeInfoRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return response.BadRequest(fmt.Errorf("failed to parse request: %w", err)) @@ -37,18 +38,20 @@ func postWorkerInfo(s *state.State, r *http.Request) response.Response { } certificates := pki.NewControlPlanePKI(pki.ControlPlanePKIOpts{Years: 10}) - certificates.CACert = cfg.Certificates.CACert - certificates.CAKey = cfg.Certificates.CAKey + certificates.CACert = cfg.Certificates.GetCACert() + certificates.CAKey = cfg.Certificates.GetCAKey() workerCertificates, err := certificates.CompleteWorkerNodePKI(workerName, nodeIP, 2048) if err != nil { return response.InternalError(fmt.Errorf("failed to generate worker PKI: %w", err)) } - snap := snap.SnapFromContext(s.Context) client, err := k8s.NewClient(snap.KubernetesRESTClientGetter("")) if err != nil { return response.InternalError(fmt.Errorf("failed to create kubernetes client: %w", err)) } + if err := client.WaitApiServerReady(s.Context); err != nil { + return response.InternalError(fmt.Errorf("kube-apiserver did not become ready in time: %w", err)) + } servers, err := client.GetKubeAPIServerEndpoints(s.Context) if err != nil { return response.InternalError(fmt.Errorf("failed to retrieve list of known kube-apiserver endpoints: %w", err)) @@ -92,14 +95,14 @@ func postWorkerInfo(s *state.State, r *http.Request) response.Response { } return response.SyncResponse(true, &apiv1.WorkerNodeInfoResponse{ - CA: cfg.Certificates.CACert, + CA: cfg.Certificates.GetCACert(), APIServers: servers, - PodCIDR: cfg.Network.PodCIDR, + PodCIDR: cfg.Network.GetPodCIDR(), KubeletToken: kubeletToken, KubeProxyToken: proxyToken, - ClusterDomain: cfg.Kubelet.ClusterDomain, - ClusterDNS: cfg.Kubelet.ClusterDNS, - CloudProvider: cfg.Kubelet.CloudProvider, + ClusterDomain: cfg.Kubelet.GetClusterDomain(), + ClusterDNS: cfg.Kubelet.GetClusterDNS(), + CloudProvider: cfg.Kubelet.GetCloudProvider(), KubeletCert: workerCertificates.KubeletCert, KubeletKey: workerCertificates.KubeletKey, }) diff --git a/src/k8s/pkg/k8sd/api/worker_access_handler.go b/src/k8s/pkg/k8sd/api/worker_access_handler.go index b9ab0cfc9..fad76a43f 100644 --- a/src/k8s/pkg/k8sd/api/worker_access_handler.go +++ b/src/k8s/pkg/k8sd/api/worker_access_handler.go @@ -7,15 +7,14 @@ import ( "net/http" "github.com/canonical/k8s/pkg/k8sd/database" - "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/lxd/lxd/response" "github.com/canonical/microcluster/state" ) -func RestrictWorkers(s *state.State, r *http.Request) response.Response { - snap := snap.SnapFromContext(s.Context) +func (e *Endpoints) restrictWorkers(s *state.State, r *http.Request) response.Response { + snap := e.provider.Snap() isWorker, err := snaputil.IsWorker(snap) if err != nil { diff --git a/src/k8s/pkg/k8sd/app/app.go b/src/k8s/pkg/k8sd/app/app.go index 849d5e629..6d230e28e 100644 --- a/src/k8s/pkg/k8sd/app/app.go +++ b/src/k8s/pkg/k8sd/app/app.go @@ -3,12 +3,17 @@ package app import ( "context" "fmt" + "sync" + "time" "github.com/canonical/k8s/pkg/k8sd/api" + "github.com/canonical/k8s/pkg/k8sd/controllers" "github.com/canonical/k8s/pkg/k8sd/database" "github.com/canonical/k8s/pkg/snap" + "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/microcluster/config" "github.com/canonical/microcluster/microcluster" + "github.com/canonical/microcluster/state" ) // Config defines configuration for the k8sd app. @@ -17,8 +22,6 @@ type Config struct { Debug bool // Verbose increases log message verbosity. Verbose bool - // ListenPort is the network port to bind for connections. - ListenPort uint // StateDir is the local directory to store the state of the node. StateDir string // Snap is the snap instance to use. @@ -27,30 +30,52 @@ type Config struct { // App is the k8sd microcluster instance. type App struct { - MicroCluster *microcluster.MicroCluster + microCluster *microcluster.MicroCluster + snap snap.Snap + + // readyWg is used to denote that the microcluster node is now running + readyWg sync.WaitGroup + + nodeConfigController *controllers.NodeConfigurationController + controlPlaneConfigController *controllers.ControlPlaneConfigurationController } // New initializes a new microcluster instance from configuration. func New(ctx context.Context, cfg Config) (*App, error) { - ctx = snap.ContextWithSnap(ctx, cfg.Snap) - if cfg.StateDir == "" { cfg.StateDir = cfg.Snap.K8sdStateDir() } cluster, err := microcluster.App(ctx, microcluster.Args{ - Verbose: cfg.Verbose, - Debug: cfg.Debug, - ListenPort: fmt.Sprintf("%d", cfg.ListenPort), - StateDir: cfg.StateDir, + Verbose: cfg.Verbose, + Debug: cfg.Debug, + StateDir: cfg.StateDir, }) if err != nil { return nil, fmt.Errorf("failed to create microcluster app: %w", err) } - return &App{ - MicroCluster: cluster, - }, nil + app := &App{ + microCluster: cluster, + snap: cfg.Snap, + } + app.readyWg.Add(1) + + app.nodeConfigController = controllers.NewNodeConfigurationController( + cfg.Snap, + app.readyWg.Wait, + func() (*k8s.Client, error) { + return k8s.NewClient(cfg.Snap.KubernetesNodeRESTClientGetter("kube-system")) + }, + ) + + app.controlPlaneConfigController = controllers.NewControlPlaneConfigurationController( + cfg.Snap, + app.readyWg.Wait, + time.NewTicker(10*time.Second).C, + ) + + return app, nil } // Run starts the microcluster node and waits until it terminates. @@ -58,14 +83,14 @@ func New(ctx context.Context, cfg Config) (*App, error) { func (a *App) Run(customHooks *config.Hooks) error { // TODO: consider improving API for overriding hooks. hooks := &config.Hooks{ - OnBootstrap: onBootstrap, - PostJoin: onPostJoin, - PreRemove: onPreRemove, - OnStart: onStart, + PostBootstrap: a.onBootstrap, + PostJoin: a.onPostJoin, + PreRemove: a.onPreRemove, + OnStart: a.onStart, } if customHooks != nil { - if customHooks.OnBootstrap != nil { - hooks.OnBootstrap = customHooks.OnBootstrap + if customHooks.PostBootstrap != nil { + hooks.PostBootstrap = customHooks.PostBootstrap } if customHooks.PostJoin != nil { hooks.PostJoin = customHooks.PostJoin @@ -77,9 +102,24 @@ func (a *App) Run(customHooks *config.Hooks) error { hooks.OnStart = customHooks.OnStart } } - err := a.MicroCluster.Start(api.Endpoints(a.MicroCluster), database.SchemaExtensions, hooks) + err := a.microCluster.Start(api.New(a).Endpoints(), database.SchemaExtensions, hooks) if err != nil { return fmt.Errorf("failed to run microcluster: %w", err) } return nil } + +func (a *App) markNodeReady(ctx context.Context, s *state.State) { + for { + if s.Database.IsOpen() { + a.readyWg.Done() + return + } + + select { + case <-ctx.Done(): + return + case <-time.After(3 * time.Second): + } + } +} diff --git a/src/k8s/pkg/k8sd/app/cluster_util.go b/src/k8s/pkg/k8sd/app/cluster_util.go index f623aebcb..e8526d953 100644 --- a/src/k8s/pkg/k8sd/app/cluster_util.go +++ b/src/k8s/pkg/k8sd/app/cluster_util.go @@ -45,10 +45,10 @@ func setupControlPlaneServices(snap snap.Snap, s *state.State, cfg types.Cluster if err := setup.Containerd(snap, nil); err != nil { return fmt.Errorf("failed to configure containerd: %w", err) } - if err := setup.KubeletControlPlane(snap, s.Name(), nodeIP, cfg.Kubelet.ClusterDNS, cfg.Kubelet.ClusterDomain, cfg.Kubelet.CloudProvider); err != nil { + if err := setup.KubeletControlPlane(snap, s.Name(), nodeIP, cfg.Kubelet.GetClusterDNS(), cfg.Kubelet.GetClusterDomain(), cfg.Kubelet.GetCloudProvider()); err != nil { return fmt.Errorf("failed to configure kubelet: %w", err) } - if err := setup.KubeProxy(s.Context, snap, s.Name(), cfg.Network.PodCIDR); err != nil { + if err := setup.KubeProxy(s.Context, snap, s.Name(), cfg.Network.GetPodCIDR()); err != nil { return fmt.Errorf("failed to configure kube-proxy: %w", err) } if err := setup.KubeControllerManager(snap); err != nil { @@ -57,7 +57,7 @@ func setupControlPlaneServices(snap snap.Snap, s *state.State, cfg types.Cluster if err := setup.KubeScheduler(snap); err != nil { return fmt.Errorf("failed to configure kube-scheduler: %w", err) } - if err := setup.KubeAPIServer(snap, cfg.Network.ServiceCIDR, s.Address().Path("1.0", "kubernetes", "auth", "webhook").String(), true, cfg.APIServer.Datastore, cfg.APIServer.DatastoreURL, cfg.APIServer.AuthorizationMode); err != nil { + if err := setup.KubeAPIServer(snap, cfg.Network.GetServiceCIDR(), s.Address().Path("1.0", "kubernetes", "auth", "webhook").String(), true, cfg.Datastore, cfg.APIServer.GetAuthorizationMode()); err != nil { return fmt.Errorf("failed to configure kube-apiserver: %w", err) } return nil diff --git a/src/k8s/pkg/k8sd/app/hooks_bootstrap.go b/src/k8s/pkg/k8sd/app/hooks_bootstrap.go index 1adb0fec1..58431b7f2 100644 --- a/src/k8s/pkg/k8sd/app/hooks_bootstrap.go +++ b/src/k8s/pkg/k8sd/app/hooks_bootstrap.go @@ -10,7 +10,6 @@ import ( "net" "net/http" "path" - "time" apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/component" @@ -18,7 +17,6 @@ import ( "github.com/canonical/k8s/pkg/k8sd/pki" "github.com/canonical/k8s/pkg/k8sd/setup" "github.com/canonical/k8s/pkg/k8sd/types" - "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/k8s" @@ -28,15 +26,17 @@ import ( // onBootstrap is called after we bootstrap the first cluster node. // onBootstrap configures local services then writes the cluster config on the database. -func onBootstrap(s *state.State, initConfig map[string]string) error { +func (a *App) onBootstrap(s *state.State, initConfig map[string]string) error { if workerToken, ok := initConfig["workerToken"]; ok { - return onBootstrapWorkerNode(s, workerToken) + return a.onBootstrapWorkerNode(s, workerToken) } - return onBootstrapControlPlane(s, initConfig) + return a.onBootstrapControlPlane(s, initConfig) } -func onBootstrapWorkerNode(s *state.State, encodedToken string) error { +func (a *App) onBootstrapWorkerNode(s *state.State, encodedToken string) error { + snap := a.Snap() + token := &types.InternalWorkerNodeToken{} if err := token.Decode(encodedToken); err != nil { return fmt.Errorf("failed to parse worker token: %w", err) @@ -51,10 +51,8 @@ func onBootstrapWorkerNode(s *state.State, encodedToken string) error { } // TODO(neoaggelos): figure out how to use the microcluster client instead - // create an HTTP client that ignores https httpClient := &http.Client{ - Timeout: 10 * time.Second, Transport: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, @@ -93,8 +91,6 @@ func onBootstrapWorkerNode(s *state.State, encodedToken string) error { } response := wrappedResp.Metadata - snap := snap.SnapFromContext(s.Context) - // Create directories if err := setup.EnsureAllDirectories(snap); err != nil { return fmt.Errorf("failed to create directories: %w", err) @@ -148,15 +144,18 @@ func onBootstrapWorkerNode(s *state.State, encodedToken string) error { return nil } -func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error { - snap := snap.SnapFromContext(s.Context) +func (a *App) onBootstrapControlPlane(s *state.State, initConfig map[string]string) error { + snap := a.Snap() - bootstrapConfig, err := apiv1.BootstrapConfigFromMap(initConfig) + bootstrapConfig, err := apiv1.BootstrapConfigFromMicrocluster(initConfig) if err != nil { return fmt.Errorf("failed to unmarshal bootstrap config: %w", err) } - cfg := types.ClusterConfigFromBootstrapConfig(bootstrapConfig) + cfg, err := types.ClusterConfigFromBootstrapConfig(bootstrapConfig) + if err != nil { + return fmt.Errorf("invalid bootstrap config: %w", err) + } cfg.SetDefaults() if err := cfg.Validate(); err != nil { return fmt.Errorf("invalid cluster configuration: %w", err) @@ -173,12 +172,12 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error } // cfg.Network.ServiceCIDR may be "IPv4CIDR[,IPv6CIDR]". get the first ip from CIDR(s). - serviceIPs, err := utils.GetKubernetesServiceIPsFromServiceCIDRs(cfg.Network.ServiceCIDR) + serviceIPs, err := utils.GetKubernetesServiceIPsFromServiceCIDRs(cfg.Network.GetServiceCIDR()) if err != nil { - return fmt.Errorf("failed to get IP address(es) from ServiceCIDR %q: %w", cfg.Network.ServiceCIDR, err) + return fmt.Errorf("failed to get IP address(es) from ServiceCIDR %q: %w", cfg.Network.GetServiceCIDR(), err) } - switch cfg.APIServer.Datastore { + switch cfg.Datastore.GetType() { case "k8s-dqlite": certificates := pki.NewK8sDqlitePKI(pki.K8sDqlitePKIOpts{ Hostname: s.Name(), @@ -193,14 +192,13 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error return fmt.Errorf("failed to write k8s-dqlite certificates: %w", err) } - cfg.Certificates.K8sDqliteCert = certificates.K8sDqliteCert - cfg.Certificates.K8sDqliteKey = certificates.K8sDqliteKey - + cfg.Datastore.K8sDqliteCert = vals.Pointer(certificates.K8sDqliteCert) + cfg.Datastore.K8sDqliteKey = vals.Pointer(certificates.K8sDqliteKey) case "external": certificates := &pki.ExternalDatastorePKI{ - DatastoreCACert: cfg.Certificates.DatastoreCACert, - DatastoreClientCert: cfg.Certificates.DatastoreClientCert, - DatastoreClientKey: cfg.Certificates.DatastoreClientKey, + DatastoreCACert: cfg.Datastore.GetExternalCACert(), + DatastoreClientCert: cfg.Datastore.GetExternalClientCert(), + DatastoreClientKey: cfg.Datastore.GetExternalClientKey(), } if err := certificates.CheckCertificates(); err != nil { return fmt.Errorf("failed to initialize external datastore certificates: %w", err) @@ -209,7 +207,7 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error return fmt.Errorf("failed to write external datastore certificates: %w", err) } default: - return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.APIServer.Datastore, setup.SupportedDatastores) + return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.Datastore.GetType(), setup.SupportedDatastores) } extraIPs, extraNames := utils.SplitIPAndDNSSANs(bootstrapConfig.ExtraSANs) @@ -233,28 +231,28 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error } // Add certificates to the cluster config - cfg.Certificates.CACert = certificates.CACert - cfg.Certificates.CAKey = certificates.CAKey - cfg.Certificates.FrontProxyCACert = certificates.FrontProxyCACert - cfg.Certificates.FrontProxyCAKey = certificates.FrontProxyCAKey - cfg.Certificates.APIServerKubeletClientCert = certificates.APIServerKubeletClientCert - cfg.Certificates.APIServerKubeletClientKey = certificates.APIServerKubeletClientKey - cfg.APIServer.ServiceAccountKey = certificates.ServiceAccountKey + cfg.Certificates.CACert = vals.Pointer(certificates.CACert) + cfg.Certificates.CAKey = vals.Pointer(certificates.CAKey) + cfg.Certificates.FrontProxyCACert = vals.Pointer(certificates.FrontProxyCACert) + cfg.Certificates.FrontProxyCAKey = vals.Pointer(certificates.FrontProxyCAKey) + cfg.Certificates.APIServerKubeletClientCert = vals.Pointer(certificates.APIServerKubeletClientCert) + cfg.Certificates.APIServerKubeletClientKey = vals.Pointer(certificates.APIServerKubeletClientKey) + cfg.Certificates.ServiceAccountKey = vals.Pointer(certificates.ServiceAccountKey) // Generate kubeconfigs - if err := setupKubeconfigs(s, snap.KubernetesConfigDir(), cfg.APIServer.SecurePort, cfg.Certificates.CACert); err != nil { + if err := setupKubeconfigs(s, snap.KubernetesConfigDir(), cfg.APIServer.GetSecurePort(), cfg.Certificates.GetCACert()); err != nil { return fmt.Errorf("failed to generate kubeconfigs: %w", err) } // Configure datastore - switch cfg.APIServer.Datastore { + switch cfg.Datastore.GetType() { case "k8s-dqlite": - if err := setup.K8sDqlite(snap, fmt.Sprintf("%s:%d", nodeIP.String(), cfg.K8sDqlite.Port), nil); err != nil { + if err := setup.K8sDqlite(snap, fmt.Sprintf("%s:%d", nodeIP.String(), cfg.Datastore.GetK8sDqlitePort()), nil); err != nil { return fmt.Errorf("failed to configure k8s-dqlite: %w", err) } case "external": default: - return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.APIServer.Datastore, setup.SupportedDatastores) + return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.Datastore.GetType(), setup.SupportedDatastores) } // Configure services @@ -264,7 +262,7 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error // Write cluster configuration to dqlite if err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { - if err := database.SetClusterConfig(ctx, tx, cfg); err != nil { + if _, err := database.SetClusterConfig(ctx, tx, cfg); err != nil { return fmt.Errorf("failed to write cluster configuration: %w", err) } return nil @@ -273,7 +271,7 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error } // Start services - if err := startControlPlaneServices(s.Context, snap, cfg.APIServer.Datastore); err != nil { + if err := startControlPlaneServices(s.Context, snap, cfg.Datastore.GetType()); err != nil { return fmt.Errorf("failed to start services: %w", err) } @@ -282,37 +280,41 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error return fmt.Errorf("kube-apiserver did not become ready in time: %w", err) } - if cfg.Network.Enabled != nil { - err := component.ReconcileNetworkComponent(s.Context, snap, vals.Pointer(false), cfg.Network.Enabled, cfg) - if err != nil { + // TODO(neoaggelos): the work below should be a POST /cluster/config + + if cfg.Network.GetEnabled() { + if err := component.ReconcileNetworkComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg); err != nil { return fmt.Errorf("failed to reconcile network: %w", err) } } - var dnsIP = cfg.Kubelet.ClusterDNS - if cfg.DNS.Enabled != nil { - dnsIP, _, err = component.ReconcileDNSComponent(s.Context, snap, vals.Pointer(false), cfg.DNS.Enabled, cfg) + if cfg.DNS.GetEnabled() { + dnsIP, _, err := component.ReconcileDNSComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg) if err != nil { return fmt.Errorf("failed to reconcile dns: %w", err) } - if err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { - if err := database.SetClusterConfig(ctx, tx, types.ClusterConfig{ - Kubelet: types.Kubelet{ - ClusterDNS: dnsIP, - }, + + // If DNS IP is not empty, update cluster configuration + if dnsIP != "" { + if err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { + if cfg, err = database.SetClusterConfig(ctx, tx, types.ClusterConfig{ + Kubelet: types.Kubelet{ + ClusterDNS: vals.Pointer(dnsIP), + }, + }); err != nil { + return fmt.Errorf("failed to update cluster configuration for dns=%s: %w", dnsIP, err) + } + return nil }); err != nil { - return fmt.Errorf("failed to update cluster configuration for dns=%s: %w", dnsIP, err) + return fmt.Errorf("database transaction to update cluster configuration failed: %w", err) } - return nil - }); err != nil { - return fmt.Errorf("database transaction to update cluster configuration failed: %w", err) } } - cmData := types.MapFromNodeConfig(types.NodeConfig{ - ClusterDNS: &dnsIP, - ClusterDomain: &cfg.Kubelet.ClusterDomain, - }) + cmData, err := cfg.Kubelet.ToConfigMap() + if err != nil { + return fmt.Errorf("failed to format kubelet configmap data: %w", err) + } client, err := k8s.NewClient(snap.KubernetesRESTClientGetter("")) if err != nil { @@ -323,37 +325,32 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error return fmt.Errorf("failed to update node configs: %w", err) } - if cfg.LocalStorage.Enabled != nil { - err := component.ReconcileLocalStorageComponent(s.Context, snap, vals.Pointer(false), cfg.LocalStorage.Enabled, cfg) - if err != nil { + if cfg.LocalStorage.GetEnabled() { + if err := component.ReconcileLocalStorageComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg); err != nil { return fmt.Errorf("failed to reconcile local-storage: %w", err) } } - if cfg.Gateway.Enabled != nil { - err := component.ReconcileGatewayComponent(s.Context, snap, vals.Pointer(false), cfg.Gateway.Enabled, cfg) - if err != nil { + if cfg.Gateway.GetEnabled() { + if err := component.ReconcileGatewayComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg); err != nil { return fmt.Errorf("failed to reconcile gateway: %w", err) } } - if cfg.Ingress.Enabled != nil { - err := component.ReconcileIngressComponent(s.Context, snap, vals.Pointer(false), cfg.Ingress.Enabled, cfg) - if err != nil { + if cfg.Ingress.GetEnabled() { + if err := component.ReconcileIngressComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg); err != nil { return fmt.Errorf("failed to reconcile ingress: %w", err) } } - if cfg.LoadBalancer.Enabled != nil { - err := component.ReconcileLoadBalancerComponent(s.Context, snap, vals.Pointer(false), cfg.LoadBalancer.Enabled, cfg) - if err != nil { + if cfg.LoadBalancer.GetEnabled() { + if err := component.ReconcileLoadBalancerComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg); err != nil { return fmt.Errorf("failed to reconcile load-balancer: %w", err) } } - if cfg.MetricsServer.Enabled != nil { - err := component.ReconcileMetricsServerComponent(s.Context, snap, vals.Pointer(false), cfg.MetricsServer.Enabled, cfg) - if err != nil { + if cfg.MetricsServer.GetEnabled() { + if err := component.ReconcileMetricsServerComponent(s.Context, snap, vals.Pointer(false), vals.Pointer(true), cfg); err != nil { return fmt.Errorf("failed to reconcile metrics-server: %w", err) } } diff --git a/src/k8s/pkg/k8sd/app/hooks_join.go b/src/k8s/pkg/k8sd/app/hooks_join.go index 1333ff5cf..0a49d1d5a 100644 --- a/src/k8s/pkg/k8sd/app/hooks_join.go +++ b/src/k8s/pkg/k8sd/app/hooks_join.go @@ -6,7 +6,6 @@ import ( "github.com/canonical/k8s/pkg/k8sd/pki" "github.com/canonical/k8s/pkg/k8sd/setup" - "github.com/canonical/k8s/pkg/snap" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/k8s" "github.com/canonical/microcluster/state" @@ -14,8 +13,8 @@ import ( // onPostJoin is called when a control plane node joins the cluster. // onPostJoin retrieves the cluster config from the database and configures local services. -func onPostJoin(s *state.State, initConfig map[string]string) error { - snap := snap.SnapFromContext(s.Context) +func (a *App) onPostJoin(s *state.State, initConfig map[string]string) error { + snap := a.Snap() cfg, err := utils.GetClusterConfig(s.Context, s) if err != nil { @@ -32,21 +31,20 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { } // cfg.Network.ServiceCIDR may be "IPv4CIDR[,IPv6CIDR]". get the first ip from CIDR(s). - serviceIPs, err := utils.GetKubernetesServiceIPsFromServiceCIDRs(cfg.Network.ServiceCIDR) + serviceIPs, err := utils.GetKubernetesServiceIPsFromServiceCIDRs(cfg.Network.GetServiceCIDR()) if err != nil { - return fmt.Errorf("failed to get IP address(es) from ServiceCIDR %q: %w", cfg.Network.ServiceCIDR, err) + return fmt.Errorf("failed to get IP address(es) from ServiceCIDR %q: %w", cfg.Network.GetServiceCIDR(), err) } - // Certificates - switch cfg.APIServer.Datastore { + switch cfg.Datastore.GetType() { case "k8s-dqlite": certificates := pki.NewK8sDqlitePKI(pki.K8sDqlitePKIOpts{ Hostname: s.Name(), IPSANs: []net.IP{{127, 0, 0, 1}}, Years: 20, }) - certificates.K8sDqliteCert = cfg.Certificates.K8sDqliteCert - certificates.K8sDqliteKey = cfg.Certificates.K8sDqliteKey + certificates.K8sDqliteCert = cfg.Datastore.GetK8sDqliteCert() + certificates.K8sDqliteKey = cfg.Datastore.GetK8sDqliteKey() if err := certificates.CompleteCertificates(); err != nil { return fmt.Errorf("failed to initialize k8s-dqlite certificates: %w", err) } @@ -55,9 +53,9 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { } case "external": certificates := &pki.ExternalDatastorePKI{ - DatastoreCACert: cfg.Certificates.DatastoreCACert, - DatastoreClientCert: cfg.Certificates.DatastoreClientCert, - DatastoreClientKey: cfg.Certificates.DatastoreClientKey, + DatastoreCACert: cfg.Datastore.GetExternalCACert(), + DatastoreClientCert: cfg.Datastore.GetExternalClientCert(), + DatastoreClientKey: cfg.Datastore.GetExternalClientKey(), } if err := certificates.CheckCertificates(); err != nil { return fmt.Errorf("failed to initialize external datastore certificates: %w", err) @@ -66,7 +64,7 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { return fmt.Errorf("failed to write external datastore certificates: %w", err) } default: - return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.APIServer.Datastore, setup.SupportedDatastores) + return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.Datastore.GetType(), setup.SupportedDatastores) } // Certificates @@ -78,13 +76,13 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { }) // load existing certificates, then generate certificates for the node - certificates.CACert = cfg.Certificates.CACert - certificates.CAKey = cfg.Certificates.CAKey - certificates.FrontProxyCACert = cfg.Certificates.FrontProxyCACert - certificates.FrontProxyCAKey = cfg.Certificates.FrontProxyCAKey - certificates.APIServerKubeletClientCert = cfg.Certificates.APIServerKubeletClientCert - certificates.APIServerKubeletClientKey = cfg.Certificates.APIServerKubeletClientKey - certificates.ServiceAccountKey = cfg.APIServer.ServiceAccountKey + certificates.CACert = cfg.Certificates.GetCACert() + certificates.CAKey = cfg.Certificates.GetCAKey() + certificates.FrontProxyCACert = cfg.Certificates.GetFrontProxyCACert() + certificates.FrontProxyCAKey = cfg.Certificates.GetFrontProxyCAKey() + certificates.APIServerKubeletClientCert = cfg.Certificates.GetAPIServerKubeletClientCert() + certificates.APIServerKubeletClientKey = cfg.Certificates.GetAPIServerKubeletClientKey() + certificates.ServiceAccountKey = cfg.Certificates.GetServiceAccountKey() if err := certificates.CompleteCertificates(); err != nil { return fmt.Errorf("failed to initialize control plane certificates: %w", err) @@ -93,12 +91,12 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { return fmt.Errorf("failed to write control plane certificates: %w", err) } - if err := setupKubeconfigs(s, snap.KubernetesConfigDir(), cfg.APIServer.SecurePort, cfg.Certificates.CACert); err != nil { + if err := setupKubeconfigs(s, snap.KubernetesConfigDir(), cfg.APIServer.GetSecurePort(), cfg.Certificates.GetCACert()); err != nil { return fmt.Errorf("failed to generate kubeconfigs: %w", err) } // Configure datastore - switch cfg.APIServer.Datastore { + switch cfg.Datastore.GetType() { case "k8s-dqlite": leader, err := s.Leader() if err != nil { @@ -110,16 +108,16 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { } cluster := make([]string, len(members)) for _, member := range members { - cluster = append(cluster, fmt.Sprintf("%s:%d", member.Address.Addr(), cfg.K8sDqlite.Port)) + cluster = append(cluster, fmt.Sprintf("%s:%d", member.Address.Addr(), cfg.Datastore.GetK8sDqlitePort())) } - address := fmt.Sprintf("%s:%d", nodeIP.String(), cfg.K8sDqlite.Port) + address := fmt.Sprintf("%s:%d", nodeIP.String(), cfg.Datastore.GetK8sDqlitePort()) if err := setup.K8sDqlite(snap, address, cluster); err != nil { return fmt.Errorf("failed to configure k8s-dqlite with address=%s cluster=%v: %w", address, cluster, err) } case "external": default: - return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.APIServer.Datastore, setup.SupportedDatastores) + return fmt.Errorf("unsupported datastore %s, must be one of %v", cfg.Datastore.GetType(), setup.SupportedDatastores) } // Configure services @@ -128,7 +126,7 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { } // Start services - if err := startControlPlaneServices(s.Context, snap, cfg.APIServer.Datastore); err != nil { + if err := startControlPlaneServices(s.Context, snap, cfg.Datastore.GetType()); err != nil { return fmt.Errorf("failed to start services: %w", err) } @@ -140,8 +138,8 @@ func onPostJoin(s *state.State, initConfig map[string]string) error { return nil } -func onPreRemove(s *state.State, force bool) error { - snap := snap.SnapFromContext(s.Context) +func (a *App) onPreRemove(s *state.State, force bool) error { + snap := a.Snap() cfg, err := utils.GetClusterConfig(s.Context, s) if err != nil { @@ -149,14 +147,14 @@ func onPreRemove(s *state.State, force bool) error { } // configure datastore - switch cfg.APIServer.Datastore { + switch cfg.Datastore.GetType() { case "k8s-dqlite": client, err := snap.K8sDqliteClient(s.Context) if err != nil { return fmt.Errorf("failed to create k8s-dqlite client: %w", err) } - nodeAddress := net.JoinHostPort(s.Address().Hostname(), fmt.Sprintf("%d", cfg.K8sDqlite.Port)) + nodeAddress := net.JoinHostPort(s.Address().Hostname(), fmt.Sprintf("%d", cfg.Datastore.GetK8sDqlitePort())) if err := client.RemoveNodeByAddress(s.Context, nodeAddress); err != nil { return fmt.Errorf("failed to remove node with address %s from k8s-dqlite cluster: %w", nodeAddress, err) } diff --git a/src/k8s/pkg/k8sd/app/hooks_start.go b/src/k8s/pkg/k8sd/app/hooks_start.go index 2f3599e08..93b00d33a 100644 --- a/src/k8s/pkg/k8sd/app/hooks_start.go +++ b/src/k8s/pkg/k8sd/app/hooks_start.go @@ -2,33 +2,27 @@ package app import ( "context" - "time" - "github.com/canonical/k8s/pkg/k8sd/controllers" - "github.com/canonical/k8s/pkg/snap" - "github.com/canonical/k8s/pkg/utils/k8s" + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils" "github.com/canonical/microcluster/state" ) -func onStart(s *state.State) error { - snap := snap.SnapFromContext(s.Context) +func (a *App) onStart(s *state.State) error { + // start a goroutine to mark the node as running + go a.markNodeReady(s.Context, s) - configController := controllers.NewNodeConfigurationController(snap, func(ctx context.Context) *k8s.Client { - for { - select { - case <-ctx.Done(): - return nil - case <-time.After(3 * time.Second): - } + // start node config controller + if a.nodeConfigController != nil { + go a.nodeConfigController.Run(s.Context) + } - client, err := k8s.NewClient(snap.KubernetesNodeRESTClientGetter("kube-system")) - if err != nil { - continue - } - return client - } - }) - go configController.Run(s.Context) + // start control plane config controller + if a.controlPlaneConfigController != nil { + go a.controlPlaneConfigController.Run(s.Context, func(ctx context.Context) (types.ClusterConfig, error) { + return utils.GetClusterConfig(ctx, s) + }) + } return nil } diff --git a/src/k8s/pkg/k8sd/app/provider.go b/src/k8s/pkg/k8sd/app/provider.go new file mode 100644 index 000000000..ff366a080 --- /dev/null +++ b/src/k8s/pkg/k8sd/app/provider.go @@ -0,0 +1,18 @@ +package app + +import ( + "github.com/canonical/k8s/pkg/k8sd/api" + "github.com/canonical/k8s/pkg/snap" + "github.com/canonical/microcluster/microcluster" +) + +func (a *App) MicroCluster() *microcluster.MicroCluster { + return a.microCluster +} + +func (a *App) Snap() snap.Snap { + return a.snap +} + +// Ensure App implements api.Provider +var _ api.Provider = &App{} diff --git a/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go b/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go new file mode 100644 index 000000000..968ed3a5e --- /dev/null +++ b/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go @@ -0,0 +1,107 @@ +package controllers + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/canonical/k8s/pkg/k8sd/pki" + "github.com/canonical/k8s/pkg/k8sd/setup" + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/snap" + snaputil "github.com/canonical/k8s/pkg/snap/util" +) + +// ControlPlaneConfigurationController watches for changes in the cluster configuration +// and applies them on the control plane services. +type ControlPlaneConfigurationController struct { + snap snap.Snap + waitReady func() + triggerCh <-chan time.Time +} + +// NewControlPlaneConfigurationController creates a new controller. +// triggerCh is typically a `time.NewTicker().C` +func NewControlPlaneConfigurationController(snap snap.Snap, waitReady func(), triggerCh <-chan time.Time) *ControlPlaneConfigurationController { + return &ControlPlaneConfigurationController{ + snap: snap, + waitReady: waitReady, + triggerCh: triggerCh, + } +} + +// Run starts the controller. +// Run accepts a context to manage the lifecycle of the controller. +// Run accepts a function that retrieves the current cluster configuration. +// Run will loop every time the trigger channel is +func (c *ControlPlaneConfigurationController) Run(ctx context.Context, getClusterConfig func(context.Context) (types.ClusterConfig, error)) { + c.waitReady() + + for { + select { + case <-ctx.Done(): + return + case <-c.triggerCh: + } + + if isWorker, err := snaputil.IsWorker(c.snap); err != nil { + log.Println(fmt.Errorf("failed to check if this is a worker node: %w", err)) + continue + } else if isWorker { + log.Println("Stopping control plane controller as this is a worker node") + return + } + + config, err := getClusterConfig(ctx) + if err != nil { + log.Println(fmt.Errorf("failed to reconcile control plane configuration: %w", err)) + continue + } + + if err := c.reconcile(ctx, config); err != nil { + log.Println(fmt.Errorf("failed to reconcile control plane configuration: %w", err)) + } + } +} + +func (c *ControlPlaneConfigurationController) reconcile(ctx context.Context, config types.ClusterConfig) error { + // kube-apiserver: external datastore + switch config.Datastore.GetType() { + case "external": + // certificates + if err := setup.EnsureExtDatastorePKI(c.snap, &pki.ExternalDatastorePKI{ + DatastoreCACert: config.Datastore.GetExternalCACert(), + DatastoreClientCert: config.Datastore.GetExternalClientCert(), + DatastoreClientKey: config.Datastore.GetExternalClientKey(), + }); err != nil { + return fmt.Errorf("failed to reconcile external datastore certificates: %w", err) + } + + // kube-apiserver arguments + updateArgs, deleteArgs := config.Datastore.ToKubeAPIServerArguments(c.snap) + if mustRestart, err := snaputil.UpdateServiceArguments(c.snap, "kube-apiserver", updateArgs, deleteArgs); err != nil { + return fmt.Errorf("failed to update kube-apiserver datastore arguments: %w", err) + } else if mustRestart { + if err := c.snap.RestartService(ctx, "kube-apiserver"); err != nil { + return fmt.Errorf("failed to restart kube-apiserver to apply configuration: %w", err) + } + } + } + + // kube-controller-manager: cloud-provider + if v := config.Kubelet.CloudProvider; v != nil { + mustRestart, err := snaputil.UpdateServiceArguments(c.snap, "kube-controller-manager", map[string]string{"--cloud-provider": *v}, nil) + if err != nil { + return fmt.Errorf("failed to update kube-controller-manager arguments: %w", err) + } + + if mustRestart { + if err := c.snap.RestartService(ctx, "kube-controller-manager"); err != nil { + return fmt.Errorf("failed to restart kube-controller-manager to apply configuration: %w", err) + } + } + } + + return nil +} diff --git a/src/k8s/pkg/k8sd/controllers/control_plane_configuration_test.go b/src/k8s/pkg/k8sd/controllers/control_plane_configuration_test.go new file mode 100644 index 000000000..83f1c1169 --- /dev/null +++ b/src/k8s/pkg/k8sd/controllers/control_plane_configuration_test.go @@ -0,0 +1,319 @@ +package controllers_test + +import ( + "context" + "os" + "path" + "testing" + "time" + + "github.com/canonical/k8s/pkg/k8sd/controllers" + "github.com/canonical/k8s/pkg/k8sd/setup" + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/snap/mock" + snaputil "github.com/canonical/k8s/pkg/snap/util" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +// channelSendTimeout is the timeout for pushing to channels for TestControlPlaneConfigController +const channelSendTimeout = 100 * time.Millisecond + +type configProvider struct { + config types.ClusterConfig +} + +func (c *configProvider) getConfig(ctx context.Context) (types.ClusterConfig, error) { + return c.config, nil +} + +func TestControlPlaneConfigController(t *testing.T) { + t.Run("ControlPlane", func(t *testing.T) { + dir := t.TempDir() + + s := &mock.Snap{ + Mock: mock.Mock{ + EtcdPKIDir: path.Join(dir, "etcd-pki"), + ServiceArgumentsDir: path.Join(dir, "args"), + UID: os.Getuid(), + GID: os.Getgid(), + }, + } + + g := NewWithT(t) + g.Expect(setup.EnsureAllDirectories(s)).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + triggerCh := make(chan time.Time) + configProvider := &configProvider{} + + ctrl := controllers.NewControlPlaneConfigurationController(s, func() {}, triggerCh) + go ctrl.Run(ctx, configProvider.getConfig) + + for _, tc := range []struct { + name string + config types.ClusterConfig + + expectKubeAPIServerArgs map[string]string + expectKubeControllerManagerArgs map[string]string + + expectServiceRestarts []string + expectFilesToExist map[string]bool + }{ + { + name: "Default", + config: types.ClusterConfig{ + Datastore: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("http://127.0.0.1:2379"), + }, + }, + expectKubeAPIServerArgs: map[string]string{ + "--etcd-servers": "http://127.0.0.1:2379", + }, + expectFilesToExist: map[string]bool{ + path.Join(dir, "etcd-pki", "ca.crt"): false, + path.Join(dir, "etcd-pki", "client.crt"): false, + path.Join(dir, "etcd-pki", "client.key"): false, + }, + expectServiceRestarts: []string{"kube-apiserver"}, + }, + { + name: "Certs", + config: types.ClusterConfig{ + Datastore: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("https://127.0.0.1:2379"), + ExternalCACert: vals.Pointer("CA DATA"), + ExternalClientCert: vals.Pointer("CERT DATA"), + ExternalClientKey: vals.Pointer("KEY DATA"), + }, + }, + expectKubeAPIServerArgs: map[string]string{ + "--etcd-servers": "https://127.0.0.1:2379", + "--etcd-cafile": path.Join(dir, "etcd-pki", "ca.crt"), + "--etcd-certfile": path.Join(dir, "etcd-pki", "client.crt"), + "--etcd-keyfile": path.Join(dir, "etcd-pki", "client.key"), + }, + expectFilesToExist: map[string]bool{ + path.Join(dir, "etcd-pki", "ca.crt"): true, + path.Join(dir, "etcd-pki", "client.crt"): true, + path.Join(dir, "etcd-pki", "client.key"): true, + }, + expectServiceRestarts: []string{"kube-apiserver"}, + }, + { + name: "CloudProvider", + config: types.ClusterConfig{ + Kubelet: types.Kubelet{ + CloudProvider: vals.Pointer("external"), + }, + }, + expectKubeControllerManagerArgs: map[string]string{ + "--cloud-provider": "external", + }, + expectServiceRestarts: []string{"kube-controller-manager"}, + }, + { + name: "NoUpdates", + config: types.ClusterConfig{ + Datastore: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("https://127.0.0.1:2379"), + ExternalCACert: vals.Pointer("CA DATA"), + ExternalClientCert: vals.Pointer("CERT DATA"), + ExternalClientKey: vals.Pointer("KEY DATA"), + }, + Kubelet: types.Kubelet{ + CloudProvider: vals.Pointer("external"), + }, + }, + expectKubeAPIServerArgs: map[string]string{ + "--etcd-servers": "https://127.0.0.1:2379", + "--etcd-cafile": path.Join(dir, "etcd-pki", "ca.crt"), + "--etcd-certfile": path.Join(dir, "etcd-pki", "client.crt"), + "--etcd-keyfile": path.Join(dir, "etcd-pki", "client.key"), + }, + expectFilesToExist: map[string]bool{ + path.Join(dir, "etcd-pki", "ca.crt"): true, + path.Join(dir, "etcd-pki", "client.crt"): true, + path.Join(dir, "etcd-pki", "client.key"): true, + }, + expectKubeControllerManagerArgs: map[string]string{ + "--cloud-provider": "external", + }, + }, + { + name: "UpdateAll", + config: types.ClusterConfig{ + Datastore: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("http://127.0.0.1:2379"), + }, + Kubelet: types.Kubelet{ + CloudProvider: vals.Pointer(""), + }, + }, + expectKubeAPIServerArgs: map[string]string{ + "--etcd-servers": "http://127.0.0.1:2379", + "--etcd-cafile": "", + "--etcd-certfile": "", + "--etcd-keyfile": "", + }, + expectFilesToExist: map[string]bool{ + path.Join(dir, "etcd-pki", "ca.crt"): false, + path.Join(dir, "etcd-pki", "client.crt"): false, + path.Join(dir, "etcd-pki", "client.key"): false, + }, + expectKubeControllerManagerArgs: map[string]string{ + "--cloud-provider": "", + }, + expectServiceRestarts: []string{"kube-apiserver", "kube-controller-manager"}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + s.RestartServiceCalledWith = nil + + configProvider.config = tc.config + + select { + case triggerCh <- time.Now(): + case <-time.After(channelSendTimeout): + g.Fail("Timed out while attempting to trigger controller reconcile loop") + } + + // TODO: this should be changed to call g.Eventually() + <-time.After(50 * time.Millisecond) + + g.Expect(s.RestartServiceCalledWith).To(ConsistOf(tc.expectServiceRestarts)) + + t.Run("APIServerArgs", func(t *testing.T) { + for earg, eval := range tc.expectKubeAPIServerArgs { + t.Run(earg, func(t *testing.T) { + g := NewWithT(t) + + val, err := snaputil.GetServiceArgument(s, "kube-apiserver", earg) + g.Expect(err).To(BeNil()) + g.Expect(val).To(Equal(eval)) + }) + } + }) + + t.Run("KubeControllerManagerArgs", func(t *testing.T) { + for earg, eval := range tc.expectKubeControllerManagerArgs { + t.Run(earg, func(t *testing.T) { + g := NewWithT(t) + + val, err := snaputil.GetServiceArgument(s, "kube-controller-manager", earg) + g.Expect(err).To(BeNil()) + g.Expect(val).To(Equal(eval)) + }) + } + }) + + t.Run("Certs", func(t *testing.T) { + for file, mustExist := range tc.expectFilesToExist { + t.Run(path.Base(file), func(t *testing.T) { + g := NewWithT(t) + + _, err := os.Stat(file) + if mustExist { + g.Expect(err).To(BeNil()) + } else { + g.Expect(err).To(MatchError(os.ErrNotExist)) + } + }) + } + }) + }) + } + }) + + t.Run("Worker", func(t *testing.T) { + dir := t.TempDir() + + s := &mock.Snap{ + Mock: mock.Mock{ + EtcdPKIDir: path.Join(dir, "etcd-pki"), + ServiceArgumentsDir: path.Join(dir, "args"), + LockFilesDir: path.Join(dir, "locks"), + UID: os.Getuid(), + GID: os.Getgid(), + }, + } + + g := NewWithT(t) + g.Expect(setup.EnsureAllDirectories(s)).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + triggerCh := make(chan time.Time) + configProvider := &configProvider{} + + ctrl := controllers.NewControlPlaneConfigurationController(s, func() {}, triggerCh) + go ctrl.Run(ctx, configProvider.getConfig) + + // mark as worker node + g.Expect(snaputil.MarkAsWorkerNode(s, true)).To(Succeed()) + + configProvider.config = types.ClusterConfig{ + Datastore: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("https://127.0.0.1:2379"), + ExternalCACert: vals.Pointer("CA DATA"), + ExternalClientCert: vals.Pointer("CERT DATA"), + ExternalClientKey: vals.Pointer("KEY DATA"), + }, + Kubelet: types.Kubelet{ + CloudProvider: vals.Pointer("external"), + }, + } + + select { + case triggerCh <- time.Now(): + case <-time.After(channelSendTimeout): + g.Fail("Timed out while attempting to trigger controller reconcile loop") + } + + // TODO: this should be changed to call g.Eventually() + <-time.After(50 * time.Millisecond) + + g.Expect(s.RestartServiceCalledWith).To(BeEmpty()) + + t.Run("APIServerArgs", func(t *testing.T) { + for _, arg := range []string{"--etcd-servers", "--etcd-cafile", "--etcd-certfile", "--etcd-keyfile"} { + t.Run(arg, func(t *testing.T) { + g := NewWithT(t) + + val, err := snaputil.GetServiceArgument(s, "kube-apiserver", "--etcd-servers") + g.Expect(err).To(HaveOccurred()) + g.Expect(val).To(BeEmpty()) + }) + } + }) + + t.Run("KubeControllerManagerArgs", func(t *testing.T) { + g := NewWithT(t) + + val, err := snaputil.GetServiceArgument(s, "kube-controller-manager", "--cloud-provider") + g.Expect(err).To(HaveOccurred()) + g.Expect(val).To(BeEmpty()) + }) + + t.Run("Certs", func(t *testing.T) { + for _, cert := range []string{"ca.crt", "client.crt", "client.key"} { + t.Run(cert, func(t *testing.T) { + g := NewWithT(t) + + _, err := os.Stat(path.Join(dir, "etcd-pki", cert)) + g.Expect(err).To(MatchError(os.ErrNotExist)) + }) + } + }) + }) +} diff --git a/src/k8s/pkg/k8sd/controllers/node_configuration.go b/src/k8s/pkg/k8sd/controllers/node_configuration.go index 18ea31ad9..bf2d718f1 100644 --- a/src/k8s/pkg/k8sd/controllers/node_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/node_configuration.go @@ -14,59 +14,88 @@ import ( ) type NodeConfigurationController struct { - snap snap.Snap - createK8sClient func(ctx context.Context) *k8s.Client + snap snap.Snap + waitReady func() + newK8sClient func() (*k8s.Client, error) } -func NewNodeConfigurationController(snap snap.Snap, createK8sClient func(ctx context.Context) *k8s.Client) *NodeConfigurationController { +func NewNodeConfigurationController(snap snap.Snap, waitReady func(), newK8sClient func() (*k8s.Client, error)) *NodeConfigurationController { return &NodeConfigurationController{ - snap: snap, - createK8sClient: createK8sClient, + snap: snap, + waitReady: waitReady, + newK8sClient: newK8sClient, } } -func (c *NodeConfigurationController) Run(ctx context.Context) { - client := c.createK8sClient(ctx) +func (c *NodeConfigurationController) retryNewK8sClient(ctx context.Context) (*k8s.Client, error) { for { + client, err := c.newK8sClient() + if err == nil { + return client, nil + } + select { case <-ctx.Done(): - return + return nil, ctx.Err() case <-time.After(3 * time.Second): } + } +} +func (c *NodeConfigurationController) Run(ctx context.Context) { + // wait for microcluster node to be ready + c.waitReady() + + client, err := c.retryNewK8sClient(ctx) + if err != nil { + log.Println(fmt.Errorf("failed to create a Kubernetes client: %w", err)) + } + + for { if err := client.WatchConfigMap(ctx, "kube-system", "k8sd-config", func(configMap *v1.ConfigMap) error { return c.reconcile(ctx, configMap) }); err != nil { // This also can fail during bootstrapping/start up when api-server is not ready // So the watch requests get connection refused replies - log.Println(fmt.Errorf("error while watching configmap: %w", err)) + log.Println(fmt.Errorf("failed to watch configmap: %w", err)) + } + + select { + case <-ctx.Done(): + return + case <-time.After(3 * time.Second): } } } func (c *NodeConfigurationController) reconcile(ctx context.Context, configMap *v1.ConfigMap) error { - nodeConfig := types.NodeConfigFromMap(configMap.Data) - - kubeletUpdateMap := make(map[string]string) - var kubeletDeleteList []string - - if nodeConfig.ClusterDNS != nil && *nodeConfig.ClusterDNS != "" { - kubeletUpdateMap["--cluster-dns"] = *nodeConfig.ClusterDNS - } else { - kubeletDeleteList = append(kubeletDeleteList, "--cluster-dns") + config, err := types.KubeletFromConfigMap(configMap.Data) + if err != nil { + return fmt.Errorf("failed to parse configmap data to kubelet config: %w", err) } - if nodeConfig.ClusterDomain != nil && *nodeConfig.ClusterDomain != "" { - kubeletUpdateMap["--cluster-domain"] = *nodeConfig.ClusterDomain - } else { - kubeletUpdateMap["--cluster-domain"] = "cluster.local" - } + updateArgs := make(map[string]string) + var deleteArgs []string - if nodeConfig.CloudProvider != nil && *nodeConfig.CloudProvider != "" { - kubeletUpdateMap["--cloud-provider"] = *nodeConfig.CloudProvider - } else { - kubeletDeleteList = append(kubeletDeleteList, "--cloud-provider") + for _, loop := range []struct { + val *string + arg string + }{ + {arg: "--cloud-provider", val: config.CloudProvider}, + {arg: "--cluster-dns", val: config.ClusterDNS}, + {arg: "--cluster-domain", val: config.ClusterDomain}, + } { + switch { + case loop.val == nil: + // value is not set in the configmap, no-op + case *loop.val == "": + // value is set in the configmap to the empty string, delete argument + deleteArgs = append(deleteArgs, loop.arg) + case *loop.val != "": + // value is set in the configmap, update argument + updateArgs[loop.arg] = *loop.val + } } - mustRestartKubelet, err := snaputil.UpdateServiceArguments(c.snap, "kubelet", kubeletUpdateMap, kubeletDeleteList) + mustRestartKubelet, err := snaputil.UpdateServiceArguments(c.snap, "kubelet", updateArgs, deleteArgs) if err != nil { return fmt.Errorf("failed to update kubelet arguments: %w", err) } diff --git a/src/k8s/pkg/k8sd/controllers/node_configuration_test.go b/src/k8s/pkg/k8sd/controllers/node_configuration_test.go index e1b41075a..e99806690 100644 --- a/src/k8s/pkg/k8sd/controllers/node_configuration_test.go +++ b/src/k8s/pkg/k8sd/controllers/node_configuration_test.go @@ -2,7 +2,6 @@ package controllers import ( "context" - "net" "os" "path" "testing" @@ -21,20 +20,14 @@ import ( ) func TestConfigPropagation(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() g := NewWithT(t) - dir := t.TempDir() - s := &mock.Snap{ Mock: mock.Mock{ - KubernetesPKIDir: path.Join(dir, "pki"), - KubernetesConfigDir: path.Join(dir, "k8s-config"), - KubeletRootDir: path.Join(dir, "kubelet-root"), - ServiceArgumentsDir: path.Join(dir, "args"), - ContainerdSocketDir: path.Join(dir, "containerd-run"), - OnLXD: false, + ServiceArgumentsDir: path.Join(t.TempDir(), "args"), UID: os.Getuid(), GID: os.Getgid(), }, @@ -42,16 +35,31 @@ func TestConfigPropagation(t *testing.T) { g.Expect(setup.EnsureAllDirectories(s)).To(Succeed()) - // Call the kubelet control plane setup function - g.Expect(setup.KubeletControlPlane(s, "dev", net.ParseIP("192.168.0.1"), "10.152.1.1", "test-cluster.local", "provider")).To(Succeed()) - tests := []struct { - name string - configmap *corev1.ConfigMap - expectedUpdates map[string]string + name string + configmap *corev1.ConfigMap + expectArgs map[string]string + expectRestart bool }{ { - name: "ignore non-existent keys", + name: "Initial", + configmap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "k8sd-config", Namespace: "kube-system"}, + Data: map[string]string{ + "cluster-dns": "10.152.1.1", + "cluster-domain": "test-cluster.local", + "cloud-provider": "provider", + }, + }, + expectArgs: map[string]string{ + "--cluster-dns": "10.152.1.1", + "--cluster-domain": "test-cluster.local", + "--cloud-provider": "provider", + }, + expectRestart: true, + }, + { + name: "IgnoreUnknownFields", configmap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "k8sd-config", Namespace: "kube-system"}, Data: map[string]string{ @@ -60,19 +68,29 @@ func TestConfigPropagation(t *testing.T) { "non-existent-key3": "value3", }, }, + expectArgs: map[string]string{ + "--cluster-dns": "10.152.1.1", + "--cluster-domain": "test-cluster.local", + "--cloud-provider": "provider", + }, }, { - name: "remove cluster-dns on missing key", + name: "RemoveClusterDNS", configmap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "k8sd-config", Namespace: "kube-system"}, - Data: map[string]string{}, + Data: map[string]string{ + "cluster-dns": "", + }, }, - expectedUpdates: map[string]string{ - "--cluster-dns": "", + expectArgs: map[string]string{ + "--cluster-dns": "", + "--cluster-domain": "test-cluster.local", + "--cloud-provider": "provider", }, + expectRestart: true, }, { - name: "update node configuration", + name: "UpdateDNS", configmap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "k8sd-config", Namespace: "kube-system"}, Data: map[string]string{ @@ -80,22 +98,25 @@ func TestConfigPropagation(t *testing.T) { "cluster-dns": "10.152.1.3", }, }, - expectedUpdates: map[string]string{ + expectArgs: map[string]string{ "--cluster-domain": "test-cluster2.local", "--cluster-dns": "10.152.1.3", + "--cloud-provider": "provider", }, + expectRestart: true, }, { - name: "cluster-domain remains on missing key", + name: "PreserveClusterDomain", configmap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "k8sd-config", Namespace: "kube-system"}, Data: map[string]string{ "cluster-dns": "10.152.1.3", }, }, - expectedUpdates: map[string]string{ - "--cluster-domain": "cluster.local", + expectArgs: map[string]string{ + "--cluster-domain": "test-cluster2.local", "--cluster-dns": "10.152.1.3", + "--cloud-provider": "provider", }, }, } @@ -104,26 +125,36 @@ func TestConfigPropagation(t *testing.T) { watcher := watch.NewFake() clientset.PrependWatchReactor("configmaps", k8stesting.DefaultWatchReactor(watcher, nil)) - configController := NewNodeConfigurationController(s, func(ctx context.Context) *k8s.Client { - return &k8s.Client{Interface: clientset} + configController := NewNodeConfigurationController(s, func() {}, func() (*k8s.Client, error) { + return &k8s.Client{Interface: clientset}, nil }) go configController.Run(ctx) - defer watcher.Stop() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { + s.RestartServiceCalledWith = nil + g := NewWithT(t) watcher.Add(tc.configmap) + + // TODO: this is to ensure that the controller has handled the event. This should ideally + // be replaced with something like a "<-sentCh" instead time.Sleep(100 * time.Millisecond) - for ekey, evalue := range tc.expectedUpdates { + for ekey, evalue := range tc.expectArgs { val, err := snaputil.GetServiceArgument(s, "kubelet", ekey) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(err).To(BeNil()) g.Expect(val).To(Equal(evalue)) } + + if tc.expectRestart { + g.Expect(s.RestartServiceCalledWith).To(Equal([]string{"kubelet"})) + } else { + g.Expect(s.RestartServiceCalledWith).To(BeEmpty()) + } }) } } diff --git a/src/k8s/pkg/k8sd/database/cluster_config.go b/src/k8s/pkg/k8sd/database/cluster_config.go index 50ff916f7..8303d670a 100644 --- a/src/k8s/pkg/k8sd/database/cluster_config.go +++ b/src/k8s/pkg/k8sd/database/cluster_config.go @@ -3,49 +3,50 @@ package database import ( "context" "database/sql" + "encoding/json" "fmt" "github.com/canonical/k8s/pkg/k8sd/types" "github.com/canonical/microcluster/cluster" - "gopkg.in/yaml.v2" ) var ( clusterConfigsStmts = map[string]int{ - "insert-v1alpha1": MustPrepareStatement("cluster-configs", "insert-v1alpha1.sql"), - "select-v1alpha1": MustPrepareStatement("cluster-configs", "select-v1alpha1.sql"), + "insert-v1alpha2": MustPrepareStatement("cluster-configs", "insert-v1alpha2.sql"), + "select-v1alpha2": MustPrepareStatement("cluster-configs", "select-v1alpha2.sql"), } ) // SetClusterConfig updates the cluster configuration with any non-empty values that are set. // SetClusterConfig will attempt to merge the existing and new configs, and return an error if any protected fields have changed. -func SetClusterConfig(ctx context.Context, tx *sql.Tx, new types.ClusterConfig) error { +// SetClusterConfig will return the merged cluster configuration on success. +func SetClusterConfig(ctx context.Context, tx *sql.Tx, new types.ClusterConfig) (types.ClusterConfig, error) { old, err := GetClusterConfig(ctx, tx) if err != nil { - return fmt.Errorf("failed to fetch existing cluster config: %w", err) + return types.ClusterConfig{}, fmt.Errorf("failed to fetch existing cluster config: %w", err) } config, err := types.MergeClusterConfig(old, new) if err != nil { - return fmt.Errorf("failed to update cluster config: %w", err) + return types.ClusterConfig{}, fmt.Errorf("failed to update cluster config: %w", err) } - b, err := yaml.Marshal(config) + b, err := json.Marshal(config) if err != nil { - return fmt.Errorf("failed to encode cluster config: %w", err) + return types.ClusterConfig{}, fmt.Errorf("failed to encode cluster config: %w", err) } - insertTxStmt, err := cluster.Stmt(tx, clusterConfigsStmts["insert-v1alpha1"]) + insertTxStmt, err := cluster.Stmt(tx, clusterConfigsStmts["insert-v1alpha2"]) if err != nil { - return fmt.Errorf("failed to prepare insert statement: %w", err) + return types.ClusterConfig{}, fmt.Errorf("failed to prepare insert statement: %w", err) } if _, err := insertTxStmt.ExecContext(ctx, string(b)); err != nil { - return fmt.Errorf("failed to insert v1alpha1 config: %w", err) + return types.ClusterConfig{}, fmt.Errorf("failed to insert v1alpha2 config: %w", err) } - return nil + return config, nil } // GetClusterConfig retrieves the cluster configuration from the database. func GetClusterConfig(ctx context.Context, tx *sql.Tx) (types.ClusterConfig, error) { - txStmt, err := cluster.Stmt(tx, clusterConfigsStmts["select-v1alpha1"]) + txStmt, err := cluster.Stmt(tx, clusterConfigsStmts["select-v1alpha2"]) if err != nil { return types.ClusterConfig{}, fmt.Errorf("failed to prepare statement: %w", err) } @@ -55,12 +56,12 @@ func GetClusterConfig(ctx context.Context, tx *sql.Tx) (types.ClusterConfig, err if err == sql.ErrNoRows { return types.ClusterConfig{}, nil } - return types.ClusterConfig{}, fmt.Errorf("failed to retrieve v1alpha1 config: %w", err) + return types.ClusterConfig{}, fmt.Errorf("failed to retrieve v1alpha2 config: %w", err) } var clusterConfig types.ClusterConfig - if err := yaml.Unmarshal([]byte(s), &clusterConfig); err != nil { - return types.ClusterConfig{}, fmt.Errorf("failed to parse v1alpha1 config: %w", err) + if err := json.Unmarshal([]byte(s), &clusterConfig); err != nil { + return types.ClusterConfig{}, fmt.Errorf("failed to parse v1alpha2 config: %w", err) } return clusterConfig, nil diff --git a/src/k8s/pkg/k8sd/database/cluster_config_test.go b/src/k8s/pkg/k8sd/database/cluster_config_test.go index 10dabbdf0..608c33041 100644 --- a/src/k8s/pkg/k8sd/database/cluster_config_test.go +++ b/src/k8s/pkg/k8sd/database/cluster_config_test.go @@ -7,6 +7,7 @@ import ( "github.com/canonical/k8s/pkg/k8sd/database" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils/vals" . "github.com/onsi/gomega" ) @@ -16,14 +17,15 @@ func TestClusterConfig(t *testing.T) { g := NewWithT(t) expectedClusterConfig := types.ClusterConfig{ Certificates: types.Certificates{ - CACert: "CA CERT DATA", - CAKey: "CA KEY DATA", + CACert: vals.Pointer("CA CERT DATA"), + CAKey: vals.Pointer("CA KEY DATA"), }, } + expectedClusterConfig.SetDefaults() // Write some config to the database err := d.Transaction(ctx, func(ctx context.Context, tx *sql.Tx) error { - err := database.SetClusterConfig(context.Background(), tx, expectedClusterConfig) + _, err := database.SetClusterConfig(context.Background(), tx, expectedClusterConfig) g.Expect(err).To(BeNil()) return nil }) @@ -44,15 +46,16 @@ func TestClusterConfig(t *testing.T) { g := NewWithT(t) expectedClusterConfig := types.ClusterConfig{ Certificates: types.Certificates{ - CACert: "CA CERT DATA", - CAKey: "CA KEY DATA", + CACert: vals.Pointer("CA CERT DATA"), + CAKey: vals.Pointer("CA KEY DATA"), }, } + expectedClusterConfig.SetDefaults() err := d.Transaction(ctx, func(ctx context.Context, tx *sql.Tx) error { - err := database.SetClusterConfig(context.Background(), tx, types.ClusterConfig{ + _, err := database.SetClusterConfig(context.Background(), tx, types.ClusterConfig{ Certificates: types.Certificates{ - CACert: "CA CERT NEW DATA", + CACert: vals.Pointer("CA CERT NEW DATA"), }, }) g.Expect(err).To(HaveOccurred()) @@ -73,32 +76,34 @@ func TestClusterConfig(t *testing.T) { g := NewWithT(t) expectedClusterConfig := types.ClusterConfig{ Certificates: types.Certificates{ - CACert: "CA CERT DATA", - CAKey: "CA KEY DATA", - K8sDqliteCert: "CERT DATA", - K8sDqliteKey: "KEY DATA", + CACert: vals.Pointer("CA CERT DATA"), + CAKey: vals.Pointer("CA KEY DATA"), + ServiceAccountKey: vals.Pointer("SA KEY DATA"), }, - Kubelet: types.Kubelet{ - ClusterDNS: "10.152.183.10", + Datastore: types.Datastore{ + K8sDqliteCert: vals.Pointer("CERT DATA"), + K8sDqliteKey: vals.Pointer("KEY DATA"), }, - APIServer: types.APIServer{ - ServiceAccountKey: "SA KEY DATA", + Kubelet: types.Kubelet{ + ClusterDNS: vals.Pointer("10.152.183.10"), }, } + expectedClusterConfig.SetDefaults() err := d.Transaction(ctx, func(ctx context.Context, tx *sql.Tx) error { - err := database.SetClusterConfig(context.Background(), tx, types.ClusterConfig{ + returnedConfig, err := database.SetClusterConfig(context.Background(), tx, types.ClusterConfig{ Kubelet: types.Kubelet{ - ClusterDNS: "10.152.183.10", + ClusterDNS: vals.Pointer("10.152.183.10"), }, - Certificates: types.Certificates{ - K8sDqliteCert: "CERT DATA", - K8sDqliteKey: "KEY DATA", + Datastore: types.Datastore{ + K8sDqliteCert: vals.Pointer("CERT DATA"), + K8sDqliteKey: vals.Pointer("KEY DATA"), }, - APIServer: types.APIServer{ - ServiceAccountKey: "SA KEY DATA", + Certificates: types.Certificates{ + ServiceAccountKey: vals.Pointer("SA KEY DATA"), }, }) + g.Expect(returnedConfig).To(Equal(expectedClusterConfig)) g.Expect(err).To(BeNil()) return nil }) diff --git a/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/insert-v1alpha1.sql b/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/insert-v1alpha2.sql similarity index 84% rename from src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/insert-v1alpha1.sql rename to src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/insert-v1alpha2.sql index 3bb8895a6..9adad06b7 100644 --- a/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/insert-v1alpha1.sql +++ b/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/insert-v1alpha2.sql @@ -1,6 +1,6 @@ INSERT INTO cluster_configs(key, value) VALUES - ("v1alpha1", ?) + ("v1alpha2", ?) ON CONFLICT(key) DO UPDATE SET value = EXCLUDED.value; diff --git a/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/select-v1alpha1.sql b/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/select-v1alpha2.sql similarity index 70% rename from src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/select-v1alpha1.sql rename to src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/select-v1alpha2.sql index f95aba16d..11b40db03 100644 --- a/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/select-v1alpha1.sql +++ b/src/k8s/pkg/k8sd/database/sql/queries/cluster-configs/select-v1alpha2.sql @@ -3,4 +3,4 @@ SELECT FROM cluster_configs AS c WHERE - c.key = "v1alpha1" + c.key = "v1alpha2" diff --git a/src/k8s/pkg/k8sd/database/util_test.go b/src/k8s/pkg/k8sd/database/util_test.go index e9e23985d..a9326a02b 100644 --- a/src/k8s/pkg/k8sd/database/util_test.go +++ b/src/k8s/pkg/k8sd/database/util_test.go @@ -15,6 +15,8 @@ import ( const ( // microclusterDatabaseInitTimeout is the timeout for microcluster database initialization operations microclusterDatabaseInitTimeout = 3 * time.Second + // microclusterDatabaseShutdownTimeout is the timeout for microcluster database shutdown operations + microclusterDatabaseShutdownTimeout = 3 * time.Second ) var ( @@ -66,7 +68,7 @@ func WithDB(t *testing.T, f func(context.Context, DB)) { // app.Run() is blocking, so we get the database handle through a channel go func() { doneCh <- app.Run(&config.Hooks{ - OnBootstrap: func(s *state.State, initConfig map[string]string) error { + PostBootstrap: func(s *state.State, initConfig map[string]string) error { databaseCh <- s.Database return nil }, @@ -76,12 +78,12 @@ func WithDB(t *testing.T, f func(context.Context, DB)) { }) }() - if err := app.MicroCluster.Ready(int(microclusterDatabaseInitTimeout / time.Second)); err != nil { + if err := app.MicroCluster().Ready(int(microclusterDatabaseInitTimeout / time.Second)); err != nil { t.Fatalf("microcluster app was not ready in time: %v", err) } nextIdx++ - if err := app.MicroCluster.NewCluster(fmt.Sprintf("test-%d", nextIdx), fmt.Sprintf("127.0.0.1:%d", 51030+nextIdx), nil, microclusterDatabaseInitTimeout); err != nil { + if err := app.MicroCluster().NewCluster(fmt.Sprintf("test-%d", nextIdx), fmt.Sprintf("127.0.0.1:%d", 51030+nextIdx), nil, microclusterDatabaseInitTimeout); err != nil { t.Fatalf("microcluster app failed to bootstrap: %v", err) } @@ -100,6 +102,11 @@ func WithDB(t *testing.T, f func(context.Context, DB)) { f(ctx, db) } - // cancel context. don't bother waiting for the microcluster instance to stop, as it will not + // cancel context to stop the microcluster instance, and wait for it to shutdown cancel() + select { + case <-doneCh: + case <-time.After(microclusterDatabaseShutdownTimeout): + t.Fatalf("timed out waiting for microcluster to shutdown") + } } diff --git a/src/k8s/pkg/k8sd/pki/k8sdqlite_test.go b/src/k8s/pkg/k8sd/pki/k8sdqlite_test.go index 837e38e80..9596b2c4c 100644 --- a/src/k8s/pkg/k8sd/pki/k8sdqlite_test.go +++ b/src/k8s/pkg/k8sd/pki/k8sdqlite_test.go @@ -120,7 +120,7 @@ m5cIDhPBuZSCs7ZnhWCHF0WMztl6fqNVp2GuFGbDM+LjAZT2YOdP0Ts= for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - g := NewGomegaWithT(t) + g := NewWithT(t) err := tt.pki.CompleteCertificates() if tt.expectedError { @@ -170,7 +170,7 @@ func TestNewK8sDqlitePKI(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - g := NewGomegaWithT(t) + g := NewWithT(t) pki := NewK8sDqlitePKI(tt.opts) g.Expect(pki).To(Equal(tt.expectedPki), "Unexpected K8sDqlitePKI") diff --git a/src/k8s/pkg/k8sd/pki/load.go b/src/k8s/pkg/k8sd/pki/load.go index 243406f3a..06c3878b5 100644 --- a/src/k8s/pkg/k8sd/pki/load.go +++ b/src/k8s/pkg/k8sd/pki/load.go @@ -23,9 +23,24 @@ func loadCertificate(certPEM string, keyPEM string) (*x509.Certificate, *rsa.Pri var key *rsa.PrivateKey if keyPEM != "" { pb, _ := pem.Decode([]byte(keyPEM)) - key, err = x509.ParsePKCS1PrivateKey(pb.Bytes) - if err != nil { - return nil, nil, fmt.Errorf("failed to parse private key: %w", err) + switch pb.Type { + case "RSA PRIVATE KEY": + key, err = x509.ParsePKCS1PrivateKey(pb.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse RSA private key: %w", err) + } + case "PRIVATE KEY": + parsed, err := x509.ParsePKCS8PrivateKey(pb.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse private key: %w", err) + } + v, ok := parsed.(*rsa.PrivateKey) + if !ok { + return nil, nil, fmt.Errorf("not an RSA private key") + } + key = v + default: + return nil, nil, fmt.Errorf("unknown private key block type %q", pb.Type) } } return cert, key, nil diff --git a/src/k8s/pkg/k8sd/setup/certificates.go b/src/k8s/pkg/k8sd/setup/certificates.go index cbd5a315a..bc418afe1 100644 --- a/src/k8s/pkg/k8sd/setup/certificates.go +++ b/src/k8s/pkg/k8sd/setup/certificates.go @@ -2,6 +2,7 @@ package setup import ( "fmt" + "io/fs" "os" "path" @@ -9,113 +10,76 @@ import ( "github.com/canonical/k8s/pkg/snap" ) -func EnsureExtDatastorePKI(snap snap.Snap, certificates *pki.ExternalDatastorePKI) error { - toWrite := map[string]string{ - path.Join(snap.EtcdPKIDir(), "ca.crt"): certificates.DatastoreCACert, - path.Join(snap.EtcdPKIDir(), "client.key"): certificates.DatastoreClientKey, - path.Join(snap.EtcdPKIDir(), "client.crt"): certificates.DatastoreClientCert, +// ensureFile creates fname with the specified contents, mode and owner bits. +// ensureFile will delete the file if contents is an empty string. +func ensureFile(fname string, contents string, uid, gid int, mode fs.FileMode) error { + if contents == "" { + if err := os.Remove(fname); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to delete: %w", err) + } + return nil } - for fname, cert := range toWrite { - // Do not create files if contents are empty/certificates are not set - if cert == "" { - continue - } - if err := os.WriteFile(fname, []byte(cert), 0600); err != nil { - return fmt.Errorf("failed to write %s: %w", path.Base(fname), err) - } - if err := os.Chown(fname, snap.UID(), snap.GID()); err != nil { - return fmt.Errorf("failed to chown %s: %w", fname, err) - } - if err := os.Chmod(fname, 0600); err != nil { - return fmt.Errorf("failed to chmod %s: %w", fname, err) - } + if err := os.WriteFile(fname, []byte(contents), mode); err != nil { + return fmt.Errorf("failed to write: %w", err) + } + if err := os.Chown(fname, uid, gid); err != nil { + return fmt.Errorf("failed to chown: %w", err) + } + if err := os.Chmod(fname, mode); err != nil { + return fmt.Errorf("failed to chmod: %w", err) } return nil } -func EnsureK8sDqlitePKI(snap snap.Snap, certificates *pki.K8sDqlitePKI) error { - toWrite := map[string]string{ - path.Join(snap.K8sDqliteStateDir(), "cluster.crt"): certificates.K8sDqliteCert, - path.Join(snap.K8sDqliteStateDir(), "cluster.key"): certificates.K8sDqliteKey, - } - - for fname, cert := range toWrite { - if err := os.WriteFile(fname, []byte(cert), 0600); err != nil { - return fmt.Errorf("failed to write %s: %w", path.Base(fname), err) - } - if err := os.Chown(fname, snap.UID(), snap.GID()); err != nil { - return fmt.Errorf("failed to chown %s: %w", fname, err) - } - if err := os.Chmod(fname, 0600); err != nil { - return fmt.Errorf("failed to chmod %s: %w", fname, err) +// ensureFiles calls ensureFile for many files +func ensureFiles(uid, gid int, mode fs.FileMode, files map[string]string) error { + for fname, cert := range files { + if err := ensureFile(fname, cert, uid, gid, mode); err != nil { + return fmt.Errorf("failed to configure %s: %w", path.Base(fname), err) } } - return nil } +func EnsureExtDatastorePKI(snap snap.Snap, certificates *pki.ExternalDatastorePKI) error { + return ensureFiles(snap.UID(), snap.GID(), 0600, map[string]string{ + path.Join(snap.EtcdPKIDir(), "ca.crt"): certificates.DatastoreCACert, + path.Join(snap.EtcdPKIDir(), "client.key"): certificates.DatastoreClientKey, + path.Join(snap.EtcdPKIDir(), "client.crt"): certificates.DatastoreClientCert, + }) +} + +func EnsureK8sDqlitePKI(snap snap.Snap, certificates *pki.K8sDqlitePKI) error { + return ensureFiles(snap.UID(), snap.GID(), 0600, map[string]string{ + path.Join(snap.K8sDqliteStateDir(), "cluster.crt"): certificates.K8sDqliteCert, + path.Join(snap.K8sDqliteStateDir(), "cluster.key"): certificates.K8sDqliteKey, + }) +} + func EnsureControlPlanePKI(snap snap.Snap, certificates *pki.ControlPlanePKI) error { - toWrite := map[string]string{ + return ensureFiles(snap.UID(), snap.GID(), 0600, map[string]string{ path.Join(snap.KubernetesPKIDir(), "apiserver-kubelet-client.crt"): certificates.APIServerKubeletClientCert, path.Join(snap.KubernetesPKIDir(), "apiserver-kubelet-client.key"): certificates.APIServerKubeletClientKey, path.Join(snap.KubernetesPKIDir(), "apiserver.crt"): certificates.APIServerCert, path.Join(snap.KubernetesPKIDir(), "apiserver.key"): certificates.APIServerKey, path.Join(snap.KubernetesPKIDir(), "ca.crt"): certificates.CACert, + path.Join(snap.KubernetesPKIDir(), "ca.key"): certificates.CAKey, path.Join(snap.KubernetesPKIDir(), "front-proxy-ca.crt"): certificates.FrontProxyCACert, + path.Join(snap.KubernetesPKIDir(), "front-proxy-ca.key"): certificates.FrontProxyCAKey, path.Join(snap.KubernetesPKIDir(), "front-proxy-client.crt"): certificates.FrontProxyClientCert, path.Join(snap.KubernetesPKIDir(), "front-proxy-client.key"): certificates.FrontProxyClientKey, path.Join(snap.KubernetesPKIDir(), "kubelet.crt"): certificates.KubeletCert, path.Join(snap.KubernetesPKIDir(), "kubelet.key"): certificates.KubeletKey, path.Join(snap.KubernetesPKIDir(), "serviceaccount.key"): certificates.ServiceAccountKey, - } - - if certificates.CAKey != "" { - toWrite[path.Join(snap.KubernetesPKIDir(), "ca.key")] = certificates.CAKey - } - if certificates.FrontProxyCAKey != "" { - toWrite[path.Join(snap.KubernetesPKIDir(), "front-proxy-ca.key")] = certificates.FrontProxyCACert - } - - for fname, cert := range toWrite { - if err := os.WriteFile(fname, []byte(cert), 0600); err != nil { - return fmt.Errorf("failed to write %s: %w", path.Base(fname), err) - } - if err := os.Chown(fname, snap.UID(), snap.GID()); err != nil { - return fmt.Errorf("failed to chown %s: %w", fname, err) - } - if err := os.Chmod(fname, 0600); err != nil { - return fmt.Errorf("failed to chmod %s: %w", fname, err) - } - } - - return nil + }) } func EnsureWorkerPKI(snap snap.Snap, certificates *pki.WorkerNodePKI) error { - toWrite := map[string]string{ - path.Join(snap.KubernetesPKIDir(), "ca.crt"): certificates.CACert, - } - - if certificates.KubeletCert != "" { - toWrite[path.Join(snap.KubernetesPKIDir(), "kubelet.crt")] = certificates.KubeletCert - } - if certificates.KubeletKey != "" { - toWrite[path.Join(snap.KubernetesPKIDir(), "kubelet.key")] = certificates.KubeletKey - } - - for fname, cert := range toWrite { - if err := os.WriteFile(fname, []byte(cert), 0600); err != nil { - return fmt.Errorf("failed to write %s: %w", path.Base(fname), err) - } - if err := os.Chown(fname, snap.UID(), snap.GID()); err != nil { - return fmt.Errorf("failed to chown %s: %w", fname, err) - } - if err := os.Chmod(fname, 0600); err != nil { - return fmt.Errorf("failed to chmod %s: %w", fname, err) - } - } - - return nil + return ensureFiles(snap.UID(), snap.GID(), 0600, map[string]string{ + path.Join(snap.KubernetesPKIDir(), "ca.crt"): certificates.CACert, + path.Join(snap.KubernetesPKIDir(), "kubelet.crt"): certificates.KubeletCert, + path.Join(snap.KubernetesPKIDir(), "kubelet.key"): certificates.KubeletKey, + }) } diff --git a/src/k8s/pkg/k8sd/setup/kube_apiserver.go b/src/k8s/pkg/k8sd/setup/kube_apiserver.go index 403106636..9d7fa4c21 100644 --- a/src/k8s/pkg/k8sd/setup/kube_apiserver.go +++ b/src/k8s/pkg/k8sd/setup/kube_apiserver.go @@ -6,6 +6,7 @@ import ( "path" "strings" + "github.com/canonical/k8s/pkg/k8sd/types" "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" ) @@ -45,7 +46,7 @@ var ( ) // KubeAPIServer configures kube-apiserver on the local node. -func KubeAPIServer(snap snap.Snap, serviceCIDR string, authWebhookURL string, enableFrontProxy bool, datastore string, externalDatastoreURL string, authorizationMode string) error { +func KubeAPIServer(snap snap.Snap, serviceCIDR string, authWebhookURL string, enableFrontProxy bool, datastore types.Datastore, authorizationMode string) error { authTokenWebhookConfigFile := path.Join(snap.ServiceExtraConfigDir(), "auth-token-webhook.conf") authTokenWebhookFile, err := os.OpenFile(authTokenWebhookConfigFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { @@ -78,20 +79,15 @@ func KubeAPIServer(snap snap.Snap, serviceCIDR string, authWebhookURL string, en "--tls-private-key-file": path.Join(snap.KubernetesPKIDir(), "apiserver.key"), } - switch datastore { - case "k8s-dqlite": - args["--etcd-servers"] = fmt.Sprintf("unix://%s", path.Join(snap.K8sDqliteStateDir(), "k8s-dqlite.sock")) - case "external": - args["--etcd-servers"] = externalDatastoreURL - if _, err := os.Stat(path.Join(snap.EtcdPKIDir(), "ca.crt")); err == nil { - args["--etcd-cafile"] = path.Join(snap.EtcdPKIDir(), "ca.crt") - } - if _, err := os.Stat(path.Join(snap.EtcdPKIDir(), "client.key")); err == nil { - args["--etcd-keyfile"] = path.Join(snap.EtcdPKIDir(), "client.key") - args["--etcd-certfile"] = path.Join(snap.EtcdPKIDir(), "client.crt") - } + switch datastore.GetType() { + case "k8s-dqlite", "external": default: - return fmt.Errorf("unsupported datastore %s, must be one of %v", datastore, SupportedDatastores) + return fmt.Errorf("unsupported datastore %s, must be one of %v", datastore.GetType(), SupportedDatastores) + } + + datastoreUpdateArgs, deleteArgs := datastore.ToKubeAPIServerArguments(snap) + for key, val := range datastoreUpdateArgs { + args[key] = val } if enableFrontProxy { @@ -103,7 +99,7 @@ func KubeAPIServer(snap snap.Snap, serviceCIDR string, authWebhookURL string, en args["--proxy-client-cert-file"] = path.Join(snap.KubernetesPKIDir(), "front-proxy-client.crt") args["--proxy-client-key-file"] = path.Join(snap.KubernetesPKIDir(), "front-proxy-client.key") } - if _, err := snaputil.UpdateServiceArguments(snap, "kube-apiserver", args, nil); err != nil { + if _, err := snaputil.UpdateServiceArguments(snap, "kube-apiserver", args, deleteArgs); err != nil { return fmt.Errorf("failed to render arguments file: %w", err) } return nil diff --git a/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go b/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go index b6ec3516e..17ed02a94 100644 --- a/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go +++ b/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go @@ -7,9 +7,11 @@ import ( "testing" "github.com/canonical/k8s/pkg/k8sd/setup" + "github.com/canonical/k8s/pkg/k8sd/types" "github.com/canonical/k8s/pkg/snap/mock" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils" + "github.com/canonical/k8s/pkg/utils/vals" . "github.com/onsi/gomega" ) @@ -35,7 +37,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Call the KubeAPIServer setup function with mock arguments - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", true, "k8s-dqlite", "datastoreurl", "Node,RBAC")).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", true, types.Datastore{Type: vals.Pointer("k8s-dqlite")}, "Node,RBAC")).To(BeNil()) // Ensure the kube-apiserver arguments file has the expected arguments and values tests := []struct { @@ -90,7 +92,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Call the KubeAPIServer setup function with mock arguments - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, "k8s-dqlite", "datastoreurl", "Node,RBAC")).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: vals.Pointer("k8s-dqlite")}, "Node,RBAC")).To(BeNil()) // Ensure the kube-apiserver arguments file has the expected arguments and values tests := []struct { @@ -137,7 +139,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Setup without proxy to simplify argument list - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, "external", "datastoreurl", "Node,RBAC")).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: vals.Pointer("external"), ExternalURL: vals.Pointer("datastoreurl")}, "Node,RBAC")).To(BeNil()) g.Expect(snaputil.GetServiceArgument(s, "kube-apiserver", "--etcd-servers")).To(Equal("datastoreurl")) _, err := utils.ParseArgumentFile(path.Join(s.Mock.ServiceArgumentsDir, "kube-apiserver")) @@ -151,7 +153,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Attempt to configure kube-apiserver with an unsupported datastore - err := setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, "unsupported-datastore", "datastoreurl", "Node,RBAC") + err := setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: vals.Pointer("unsupported")}, "Node,RBAC") g.Expect(err).To(HaveOccurred()) g.Expect(err).To(MatchError(ContainSubstring("unsupported datastore"))) }) diff --git a/src/k8s/pkg/k8sd/types/cluster_config.go b/src/k8s/pkg/k8sd/types/cluster_config.go index eec9a3a4a..56900f1eb 100644 --- a/src/k8s/pkg/k8sd/types/cluster_config.go +++ b/src/k8s/pkg/k8sd/types/cluster_config.go @@ -1,298 +1,21 @@ package types -import ( - "fmt" - "net" - "strings" - - apiv1 "github.com/canonical/k8s/api/v1" - "github.com/canonical/k8s/pkg/utils/vals" -) - -// ClusterConfig is the control plane configuration format of the k8s cluster. -// ClusterConfig should attempt to use structured fields wherever possible. type ClusterConfig struct { - Network Network `yaml:"network"` - Certificates Certificates `yaml:"certificates"` - Kubelet Kubelet `yaml:"kubelet"` - K8sDqlite K8sDqlite `yaml:"k8s-dqlite"` - APIServer APIServer `yaml:"apiserver"` - DNS DNS `yaml:"dns"` - Ingress Ingress `yaml:"ingress"` - LoadBalancer LoadBalancer `yaml:"load-balancer"` - LocalStorage LocalStorage `yaml:"local-storage"` - Gateway Gateway `yaml:"gateway"` - MetricsServer MetricsServer `yaml:"metrics-server"` - Containerd Containerd `yaml:"containerd"` -} - -type Network struct { - Enabled *bool `yaml:"enabled,omitempty"` - PodCIDR string `yaml:"pod-cidr,omitempty"` - ServiceCIDR string `yaml:"svc-cidr,omitempty"` -} - -type Certificates struct { - CACert string `yaml:"ca-crt,omitempty"` - CAKey string `yaml:"ca-key,omitempty"` - APIServerKubeletClientCert string `yaml:"apiserver-kubelet-client-crt,omitempty"` - APIServerKubeletClientKey string `yaml:"apiserver-kubelet-client-key,omitempty"` - K8sDqliteCert string `yaml:"k8s-dqlite-crt,omitempty"` - K8sDqliteKey string `yaml:"k8s-dqlite-key,omitempty"` - - DatastoreCACert string `yaml:"datastore-ca-crt,omitempty"` - DatastoreClientCert string `yaml:"datastore-client-crt,omitempty"` - DatastoreClientKey string `yaml:"datastore-client-key,omitempty"` - - FrontProxyCACert string `yaml:"front-proxy-ca-crt,omitempty"` - FrontProxyCAKey string `yaml:"front-proxy-ca-key,omitempty"` -} - -type Kubelet struct { - CloudProvider string `yaml:"cloud-provider,omitempty"` - ClusterDNS string `yaml:"cluster-dns,omitempty"` - ClusterDomain string `yaml:"cluster-domain,omitempty"` -} - -type APIServer struct { - SecurePort int `yaml:"secure-port,omitempty"` - AuthorizationMode string `yaml:"authorization-mode,omitempty"` - ServiceAccountKey string `yaml:"service-account-key,omitempty"` - Datastore string `yaml:"datastore,omitempty"` - DatastoreURL string `yaml:"datastore-url,omitempty"` -} - -type K8sDqlite struct { - Port int `yaml:"port,omitempty"` -} - -type DNS struct { - Enabled *bool `yaml:"enabled,omitempty"` - UpstreamNameservers []string `yaml:"upstream-nameservers,omitempty"` -} - -type Ingress struct { - Enabled *bool `yaml:"enabled,omitempty"` - DefaultTLSSecret string `yaml:"default-tls-secret,omitempty"` - EnableProxyProtocol *bool `yaml:"enable-proxy-protocol,omitempty"` -} - -type LoadBalancer struct { - Enabled *bool `yaml:"enabled,omitempty"` - CIDRs []string `yaml:"cidrs,omitempty"` - L2Enabled *bool `yaml:"l2-mode,omitempty"` - L2Interfaces []string `yaml:"l2-interfaces,omitempty"` - BGPEnabled *bool `yaml:"bgp-mode,omitempty"` - BGPLocalASN int `yaml:"bgp-local-asn,omitempty"` - BGPPeerAddress string `yaml:"bgp-peer-address,omitempty"` - BGPPeerASN int `yaml:"bgp-peer-asn,omitempty"` - BGPPeerPort int `yaml:"bgp-peer-port,omitempty"` -} - -type LocalStorage struct { - Enabled *bool `yaml:"enabled,omitempty"` - LocalPath string `yaml:"local-path,omitempty"` - ReclaimPolicy string `yaml:"reclaim-policy,omitempty"` - SetDefault *bool `yaml:"set-default,omitempty"` -} - -type Gateway struct { - Enabled *bool `yaml:"enabled,omitempty"` -} - -type MetricsServer struct { - Enabled *bool `yaml:"enabled,omitempty"` -} - -type Containerd struct { - Registries []ContainerdRegistry `yaml:"registries,omitempty"` -} - -type ContainerdRegistry struct { - Host string `yaml:"host"` - URLs []string `yaml:"urls"` - Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` - Token string `yaml:"token,omitempty"` - OverridePath bool `yaml:"overridePath,omitempty"` - SkipVerify bool `yaml:"skipVerify,omitempty"` - // TODO(neoaggelos): add option to configure certificates for containerd registries - // CA string `yaml:"ca,omitempty"` - // Cert string `yaml:"cert,omitempty"` - // Key string `yaml:"key,omitempty"` -} - -func (c *ClusterConfig) Validate() error { - clusterCIDRs := strings.Split(c.Network.PodCIDR, ",") - if len(clusterCIDRs) != 1 && len(clusterCIDRs) != 2 { - return fmt.Errorf("invalid number of cluster CIDRs: %d", len(clusterCIDRs)) - } - serviceCIDRs := strings.Split(c.Network.ServiceCIDR, ",") - if len(serviceCIDRs) != 1 && len(serviceCIDRs) != 2 { - return fmt.Errorf("invalid number of service CIDRs: %d", len(serviceCIDRs)) - } - - for _, cidr := range append(clusterCIDRs, serviceCIDRs...) { - _, _, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("invalid CIDR: %w", err) - } - } - - return nil -} - -func (c *ClusterConfig) SetDefaults() { - if c.Network.PodCIDR == "" { - c.Network.PodCIDR = "10.1.0.0/16" - } - if c.Network.ServiceCIDR == "" { - c.Network.ServiceCIDR = "10.152.183.0/24" - } - if c.APIServer.SecurePort == 0 { - c.APIServer.SecurePort = 6443 - } - if c.APIServer.AuthorizationMode == "" { - c.APIServer.AuthorizationMode = "Node,RBAC" - } - if c.K8sDqlite.Port == 0 { - c.K8sDqlite.Port = 9000 - } - if c.DNS.UpstreamNameservers == nil { - c.DNS.UpstreamNameservers = []string{"/etc/resolv.conf"} - } - if c.Kubelet.ClusterDomain == "" { - c.Kubelet.ClusterDomain = "cluster.local" - } - if c.LocalStorage.LocalPath == "" { - c.LocalStorage.LocalPath = "/var/snap/k8s/common/rawfile-storage" - } - if c.LocalStorage.ReclaimPolicy == "" { - c.LocalStorage.ReclaimPolicy = "Delete" - } - if c.LocalStorage.SetDefault == nil { - c.LocalStorage.SetDefault = vals.Pointer(true) - } - if c.LoadBalancer.L2Enabled == nil { - c.LoadBalancer.L2Enabled = vals.Pointer(true) - } -} - -// ClusterConfigFromBootstrapConfig extracts the cluster config parts from the BootstrapConfig -// and maps them to a ClusterConfig. -func ClusterConfigFromBootstrapConfig(b *apiv1.BootstrapConfig) ClusterConfig { - authzMode := "Node,RBAC" - // Only disable rbac if explicitly set to false during bootstrap - if v := b.EnableRBAC; v != nil && !*v { - authzMode = "AlwaysAllow" - } - - config := ClusterConfig{ - Certificates: Certificates{ - DatastoreCACert: b.DatastoreCACert, - DatastoreClientCert: b.DatastoreClientCert, - DatastoreClientKey: b.DatastoreClientKey, - }, - APIServer: APIServer{ - AuthorizationMode: authzMode, - Datastore: b.Datastore, - DatastoreURL: b.DatastoreURL, - }, - Network: Network{ - PodCIDR: b.ClusterCIDR, - ServiceCIDR: b.ServiceCIDR, - }, - K8sDqlite: K8sDqlite{ - Port: b.K8sDqlitePort, - }, - } - - for _, component := range b.Components { - switch component { - case "network": - config.Network.Enabled = vals.Pointer(true) - case "dns": - config.DNS.Enabled = vals.Pointer(true) - case "local-storage": - config.LocalStorage.Enabled = vals.Pointer(true) - case "ingress": - config.Ingress.Enabled = vals.Pointer(true) - case "gateway": - config.Gateway.Enabled = vals.Pointer(true) - case "metrics-server": - config.MetricsServer.Enabled = vals.Pointer(true) - case "load-balancer": - config.LoadBalancer.Enabled = vals.Pointer(true) - } - } - - return config -} - -func ClusterConfigFromUserFacing(ufConfig *apiv1.UserFacingClusterConfig) ClusterConfig { - config := ClusterConfig{} - - if ufConfig.DNS != nil { - config.Kubelet = Kubelet{ - ClusterDNS: ufConfig.DNS.ServiceIP, - ClusterDomain: ufConfig.DNS.ClusterDomain, - } - - config.DNS = DNS{ - Enabled: ufConfig.DNS.Enabled, - UpstreamNameservers: ufConfig.DNS.UpstreamNameservers, - } - } - - if ufConfig.Network != nil { - config.Network = Network{ - Enabled: ufConfig.Network.Enabled, - } - } - - if ufConfig.Ingress != nil { - config.Ingress = Ingress{ - Enabled: ufConfig.Ingress.Enabled, - DefaultTLSSecret: ufConfig.Ingress.DefaultTLSSecret, - EnableProxyProtocol: ufConfig.Ingress.EnableProxyProtocol, - } - } - - if ufConfig.LoadBalancer != nil { - // TODO(berkayoz): make sure everything about bgp to be set if bgp enabled - config.LoadBalancer = LoadBalancer{ - Enabled: ufConfig.LoadBalancer.Enabled, - CIDRs: ufConfig.LoadBalancer.CIDRs, - L2Enabled: ufConfig.LoadBalancer.L2Enabled, - L2Interfaces: ufConfig.LoadBalancer.L2Interfaces, - BGPEnabled: ufConfig.LoadBalancer.BGPEnabled, - BGPLocalASN: ufConfig.LoadBalancer.BGPLocalASN, - BGPPeerAddress: ufConfig.LoadBalancer.BGPPeerAddress, - BGPPeerASN: ufConfig.LoadBalancer.BGPPeerASN, - BGPPeerPort: ufConfig.LoadBalancer.BGPPeerPort, - } - } - - if ufConfig.LocalStorage != nil { - config.LocalStorage = LocalStorage{ - Enabled: ufConfig.LocalStorage.Enabled, - LocalPath: ufConfig.LocalStorage.LocalPath, - ReclaimPolicy: ufConfig.LocalStorage.ReclaimPolicy, - SetDefault: ufConfig.LocalStorage.SetDefault, - } - } - - if ufConfig.Gateway != nil { - config.Gateway = Gateway{ - Enabled: ufConfig.Gateway.Enabled, - } - } - - if ufConfig.MetricsServer != nil { - config.MetricsServer = MetricsServer{ - Enabled: ufConfig.MetricsServer.Enabled, - } - } - - return config + Certificates Certificates `json:"certificates,omitempty"` + Datastore Datastore `json:"datastore,omitempty"` + APIServer APIServer `json:"apiserver,omitempty"` + Kubelet Kubelet `json:"kubelet,omitempty"` + Containerd Containerd `json:"containerd,omitempty"` + + Network Network `json:"network,omitempty"` + DNS DNS `json:"dns,omitempty"` + Ingress Ingress `json:"ingress,omitempty"` + LoadBalancer LoadBalancer `json:"load-balancer,omitempty"` + Gateway Gateway `json:"gateway,omitempty"` + LocalStorage LocalStorage `json:"local-storage,omitempty"` + MetricsServer MetricsServer `json:"metrics-server,omitempty"` +} + +func (c ClusterConfig) Empty() bool { + return c.Certificates.Empty() && c.Datastore.Empty() && c.Network.Empty() && c.APIServer.Empty() && c.Kubelet.Empty() && c.Network.Empty() && c.DNS.Empty() && c.Ingress.Empty() && c.LoadBalancer.Empty() && c.Gateway.Empty() && c.LocalStorage.Empty() && c.MetricsServer.Empty() } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_apiserver.go b/src/k8s/pkg/k8sd/types/cluster_config_apiserver.go new file mode 100644 index 000000000..bd76536ba --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_apiserver.go @@ -0,0 +1,10 @@ +package types + +type APIServer struct { + SecurePort *int `json:"port,omitempty"` + AuthorizationMode *string `json:"authorization-mode,omitempty"` +} + +func (c APIServer) GetSecurePort() int { return getField(c.SecurePort) } +func (c APIServer) GetAuthorizationMode() string { return getField(c.AuthorizationMode) } +func (c APIServer) Empty() bool { return c.SecurePort == nil && c.AuthorizationMode == nil } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_certificates.go b/src/k8s/pkg/k8sd/types/cluster_config_certificates.go new file mode 100644 index 000000000..f2e78b134 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_certificates.go @@ -0,0 +1,26 @@ +package types + +type Certificates struct { + CACert *string `json:"ca-crt,omitempty"` + CAKey *string `json:"ca-key,omitempty"` + FrontProxyCACert *string `json:"front-proxy-ca-crt,omitempty"` + FrontProxyCAKey *string `json:"front-proxy-ca-key,omitempty"` + ServiceAccountKey *string `json:"service-account-key,omitempty"` + APIServerKubeletClientCert *string `json:"apiserver-to-kubelet-client-crt,omitempty"` + APIServerKubeletClientKey *string `json:"apiserver-to-kubelet-client-key,omitempty"` +} + +func (c Certificates) GetCACert() string { return getField(c.CACert) } +func (c Certificates) GetCAKey() string { return getField(c.CAKey) } +func (c Certificates) GetFrontProxyCACert() string { return getField(c.FrontProxyCACert) } +func (c Certificates) GetFrontProxyCAKey() string { return getField(c.FrontProxyCAKey) } +func (c Certificates) GetServiceAccountKey() string { return getField(c.ServiceAccountKey) } +func (c Certificates) GetAPIServerKubeletClientCert() string { + return getField(c.APIServerKubeletClientCert) +} +func (c Certificates) GetAPIServerKubeletClientKey() string { + return getField(c.APIServerKubeletClientKey) +} +func (c Certificates) Empty() bool { + return c.CACert == nil && c.CAKey == nil && c.FrontProxyCACert == nil && c.FrontProxyCAKey == nil && c.ServiceAccountKey == nil && c.APIServerKubeletClientCert == nil && c.APIServerKubeletClientKey == nil +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_containerd.go b/src/k8s/pkg/k8sd/types/cluster_config_containerd.go new file mode 100644 index 000000000..6fa79ebb3 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_containerd.go @@ -0,0 +1,19 @@ +package types + +type ContainerdRegistry struct { + Host string `json:"host,omitempty"` + URLs []string `json:"urls,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Token string `json:"token,omitempty"` + OverridePath bool `json:"override-path,omitempty"` + SkipVerify bool `json:"skip-verify,omitempty"` + // TODO(neoaggelos): add option to configure certificates for containerd registries + // CACert string + // ClientCert string + // ClientKey string +} + +type Containerd struct { + Registries *[]ContainerdRegistry `json:"registries,omitempty"` +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_convert.go b/src/k8s/pkg/k8sd/types/cluster_config_convert.go new file mode 100644 index 000000000..90d064a56 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_convert.go @@ -0,0 +1,157 @@ +package types + +import ( + "fmt" + "strings" + + apiv1 "github.com/canonical/k8s/api/v1" + "github.com/canonical/k8s/pkg/utils/vals" +) + +// ClusterConfigFromBootstrapConfig converts BootstrapConfig from public API into a ClusterConfig. +func ClusterConfigFromBootstrapConfig(b apiv1.BootstrapConfig) (ClusterConfig, error) { + config := ClusterConfigFromUserFacing(b.ClusterConfig) + + // APIServer + config.APIServer.SecurePort = b.SecurePort + if b.DisableRBAC != nil && *b.DisableRBAC { + config.APIServer.AuthorizationMode = vals.Pointer("AlwaysAllow") + } else { + config.APIServer.AuthorizationMode = vals.Pointer("Node,RBAC") + } + + // Datastore + switch b.GetDatastoreType() { + case "", "k8s-dqlite": + if len(b.DatastoreServers) > 0 { + return ClusterConfig{}, fmt.Errorf("datastore-servers needs datastore-type to be external, not %q", b.GetDatastoreType()) + } + if b.GetDatastoreCACert() != "" { + return ClusterConfig{}, fmt.Errorf("datastore-ca-crt needs datastore-type to be external, not %q", b.GetDatastoreType()) + } + if b.GetDatastoreClientCert() != "" { + return ClusterConfig{}, fmt.Errorf("datastore-client-crt needs datastore-type to be external, not %q", b.GetDatastoreType()) + } + if b.GetDatastoreClientKey() != "" { + return ClusterConfig{}, fmt.Errorf("datastore-client-key needs datastore-type to be external, not %q", b.GetDatastoreType()) + } + + config.Datastore = Datastore{ + Type: vals.Pointer("k8s-dqlite"), + K8sDqlitePort: b.K8sDqlitePort, + } + case "external": + if len(b.DatastoreServers) == 0 { + return ClusterConfig{}, fmt.Errorf("datastore type is external but no datastore servers were set") + } + if b.GetK8sDqlitePort() != 0 { + return ClusterConfig{}, fmt.Errorf("k8s-dqlite-port needs datastore-type to be k8s-dqlite") + } + config.Datastore = Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer(strings.Join(b.DatastoreServers, ",")), + ExternalCACert: b.DatastoreCACert, + ExternalClientCert: b.DatastoreClientCert, + ExternalClientKey: b.DatastoreClientKey, + } + default: + return ClusterConfig{}, fmt.Errorf("unknown datastore type specified in bootstrap config %q", b.GetDatastoreType()) + } + + // Network + config.Network.PodCIDR = b.PodCIDR + config.Network.ServiceCIDR = b.ServiceCIDR + + // Kubelet + config.Kubelet.CloudProvider = b.CloudProvider + + return config, nil +} + +// ClusterConfigFromUserFacing converts UserFacingClusterConfig from public API into a ClusterConfig. +func ClusterConfigFromUserFacing(u apiv1.UserFacingClusterConfig) ClusterConfig { + return ClusterConfig{ + Kubelet: Kubelet{ + ClusterDNS: u.DNS.ServiceIP, + ClusterDomain: u.DNS.ClusterDomain, + }, + Network: Network{ + Enabled: u.Network.Enabled, + }, + DNS: DNS{ + Enabled: u.DNS.Enabled, + UpstreamNameservers: u.DNS.UpstreamNameservers, + }, + Ingress: Ingress{ + Enabled: u.Ingress.Enabled, + DefaultTLSSecret: u.Ingress.DefaultTLSSecret, + EnableProxyProtocol: u.Ingress.EnableProxyProtocol, + }, + LoadBalancer: LoadBalancer{ + Enabled: u.LoadBalancer.Enabled, + CIDRs: u.LoadBalancer.CIDRs, + L2Mode: u.LoadBalancer.L2Mode, + L2Interfaces: u.LoadBalancer.L2Interfaces, + BGPMode: u.LoadBalancer.BGPMode, + BGPLocalASN: u.LoadBalancer.BGPLocalASN, + BGPPeerAddress: u.LoadBalancer.BGPPeerAddress, + BGPPeerASN: u.LoadBalancer.BGPPeerASN, + BGPPeerPort: u.LoadBalancer.BGPPeerPort, + }, + LocalStorage: LocalStorage{ + Enabled: u.LocalStorage.Enabled, + LocalPath: u.LocalStorage.LocalPath, + ReclaimPolicy: u.LocalStorage.ReclaimPolicy, + SetDefault: u.LocalStorage.SetDefault, + }, + MetricsServer: MetricsServer{ + Enabled: u.MetricsServer.Enabled, + }, + Gateway: Gateway{ + Enabled: u.Gateway.Enabled, + }, + } +} + +// ToUserFacing converts a ClusterConfig to a UserFacingClusterConfig from the public API. +func (c ClusterConfig) ToUserFacing() apiv1.UserFacingClusterConfig { + return apiv1.UserFacingClusterConfig{ + Network: apiv1.NetworkConfig{ + Enabled: c.Network.Enabled, + }, + DNS: apiv1.DNSConfig{ + Enabled: c.DNS.Enabled, + ClusterDomain: c.Kubelet.ClusterDomain, + ServiceIP: c.Kubelet.ClusterDNS, + UpstreamNameservers: c.DNS.UpstreamNameservers, + }, + Ingress: apiv1.IngressConfig{ + Enabled: c.Ingress.Enabled, + DefaultTLSSecret: c.Ingress.DefaultTLSSecret, + EnableProxyProtocol: c.Ingress.EnableProxyProtocol, + }, + LoadBalancer: apiv1.LoadBalancerConfig{ + Enabled: c.LoadBalancer.Enabled, + CIDRs: c.LoadBalancer.CIDRs, + L2Mode: c.LoadBalancer.L2Mode, + L2Interfaces: c.LoadBalancer.L2Interfaces, + BGPMode: c.LoadBalancer.BGPMode, + BGPLocalASN: c.LoadBalancer.BGPLocalASN, + BGPPeerAddress: c.LoadBalancer.BGPPeerAddress, + BGPPeerASN: c.LoadBalancer.BGPPeerASN, + BGPPeerPort: c.LoadBalancer.BGPPeerPort, + }, + LocalStorage: apiv1.LocalStorageConfig{ + Enabled: c.LocalStorage.Enabled, + LocalPath: c.LocalStorage.LocalPath, + ReclaimPolicy: c.LocalStorage.ReclaimPolicy, + SetDefault: c.LocalStorage.SetDefault, + }, + MetricsServer: apiv1.MetricsServerConfig{ + Enabled: c.MetricsServer.Enabled, + }, + Gateway: apiv1.GatewayConfig{ + Enabled: c.Gateway.Enabled, + }, + } +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go b/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go new file mode 100644 index 000000000..976a9198c --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go @@ -0,0 +1,235 @@ +package types_test + +import ( + "testing" + + apiv1 "github.com/canonical/k8s/api/v1" + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func TestClusterConfigFromBootstrapConfig(t *testing.T) { + for _, tc := range []struct { + name string + bootstrap apiv1.BootstrapConfig + expectConfig types.ClusterConfig + }{ + { + name: "Nil", + expectConfig: types.ClusterConfig{ + APIServer: types.APIServer{ + AuthorizationMode: vals.Pointer("Node,RBAC"), + }, + Datastore: types.Datastore{ + Type: vals.Pointer("k8s-dqlite"), + }, + }, + }, + { + name: "DisableRBAC", + bootstrap: apiv1.BootstrapConfig{ + DisableRBAC: vals.Pointer(true), + }, + expectConfig: types.ClusterConfig{ + APIServer: types.APIServer{ + AuthorizationMode: vals.Pointer("AlwaysAllow"), + }, + Datastore: types.Datastore{ + Type: vals.Pointer("k8s-dqlite"), + }, + }, + }, + { + name: "K8sDqliteDefault", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer(""), + }, + expectConfig: types.ClusterConfig{ + APIServer: types.APIServer{ + AuthorizationMode: vals.Pointer("Node,RBAC"), + }, + Datastore: types.Datastore{ + Type: vals.Pointer("k8s-dqlite"), + }, + }, + }, + { + name: "ExternalDatastore", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer("external"), + DatastoreServers: []string{"https://10.0.0.1:2379", "https://10.0.0.2:2379"}, + DatastoreCACert: vals.Pointer("CA DATA"), + DatastoreClientCert: vals.Pointer("CERT DATA"), + DatastoreClientKey: vals.Pointer("KEY DATA"), + }, + expectConfig: types.ClusterConfig{ + APIServer: types.APIServer{ + AuthorizationMode: vals.Pointer("Node,RBAC"), + }, + Datastore: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("https://10.0.0.1:2379,https://10.0.0.2:2379"), + ExternalCACert: vals.Pointer("CA DATA"), + ExternalClientCert: vals.Pointer("CERT DATA"), + ExternalClientKey: vals.Pointer("KEY DATA"), + }, + }, + }, + { + name: "Full", + bootstrap: apiv1.BootstrapConfig{ + ClusterConfig: apiv1.UserFacingClusterConfig{ + Network: apiv1.NetworkConfig{ + Enabled: vals.Pointer(true), + }, + DNS: apiv1.DNSConfig{ + Enabled: vals.Pointer(true), + ClusterDomain: vals.Pointer("cluster.local"), + }, + Ingress: apiv1.IngressConfig{ + Enabled: vals.Pointer(true), + }, + LoadBalancer: apiv1.LoadBalancerConfig{ + Enabled: vals.Pointer(true), + L2Mode: vals.Pointer(true), + CIDRs: vals.Pointer([]string{"10.0.0.0/24"}), + }, + LocalStorage: apiv1.LocalStorageConfig{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("/storage/path"), + SetDefault: vals.Pointer(false), + }, + Gateway: apiv1.GatewayConfig{ + Enabled: vals.Pointer(true), + }, + MetricsServer: apiv1.MetricsServerConfig{ + Enabled: vals.Pointer(true), + }, + }, + PodCIDR: vals.Pointer("10.100.0.0/16"), + ServiceCIDR: vals.Pointer("10.200.0.0/16"), + DisableRBAC: vals.Pointer(false), + SecurePort: vals.Pointer(6443), + CloudProvider: vals.Pointer("external"), + K8sDqlitePort: vals.Pointer(9090), + DatastoreType: vals.Pointer("k8s-dqlite"), + ExtraSANs: []string{"custom.kubernetes"}, + }, + expectConfig: types.ClusterConfig{ + Datastore: types.Datastore{ + Type: vals.Pointer("k8s-dqlite"), + K8sDqlitePort: vals.Pointer(9090), + }, + APIServer: types.APIServer{ + SecurePort: vals.Pointer(6443), + AuthorizationMode: vals.Pointer("Node,RBAC"), + }, + Kubelet: types.Kubelet{ + ClusterDomain: vals.Pointer("cluster.local"), + CloudProvider: vals.Pointer("external"), + }, + Network: types.Network{ + Enabled: vals.Pointer(true), + PodCIDR: vals.Pointer("10.100.0.0/16"), + ServiceCIDR: vals.Pointer("10.200.0.0/16"), + }, + DNS: types.DNS{ + Enabled: vals.Pointer(true), + }, + Ingress: types.Ingress{ + Enabled: vals.Pointer(true), + }, + LoadBalancer: types.LoadBalancer{ + Enabled: vals.Pointer(true), + L2Mode: vals.Pointer(true), + CIDRs: vals.Pointer([]string{"10.0.0.0/24"}), + }, + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("/storage/path"), + SetDefault: vals.Pointer(false), + }, + Gateway: types.Gateway{ + Enabled: vals.Pointer(true), + }, + MetricsServer: types.MetricsServer{ + Enabled: vals.Pointer(true), + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + config, err := types.ClusterConfigFromBootstrapConfig(tc.bootstrap) + g.Expect(err).To(BeNil()) + g.Expect(config).To(Equal(tc.expectConfig)) + }) + } + + t.Run("Invalid", func(t *testing.T) { + for _, tc := range []struct { + name string + bootstrap apiv1.BootstrapConfig + }{ + { + name: "K8sDqliteWithExternalServers", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer(""), + DatastoreServers: []string{"http://10.0.0.1:2379"}, + }, + }, + { + name: "K8sDqliteWithExternalCA", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer(""), + DatastoreCACert: vals.Pointer("CA DATA"), + }, + }, + { + name: "K8sDqliteWithExternalClientCert", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer(""), + DatastoreClientCert: vals.Pointer("CERT DATA"), + }, + }, + { + name: "K8sDqliteWithExternalClientKey", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer(""), + DatastoreClientKey: vals.Pointer("KEY DATA"), + }, + }, + { + name: "ExternalWithK8sDqlitePort", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer("external"), + DatastoreServers: []string{"http://10.0.0.1:2379"}, + K8sDqlitePort: vals.Pointer(18080), + }, + }, + { + name: "ExternalWithoutServers", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer("external"), + }, + }, + { + name: "UnsupportedDatastore", + bootstrap: apiv1.BootstrapConfig{ + DatastoreType: vals.Pointer("unknown"), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + config, err := types.ClusterConfigFromBootstrapConfig(tc.bootstrap) + g.Expect(config).To(BeZero()) + g.Expect(err).To(HaveOccurred()) + }) + } + + }) +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_datastore.go b/src/k8s/pkg/k8sd/types/cluster_config_datastore.go new file mode 100644 index 000000000..f90617a3d --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_datastore.go @@ -0,0 +1,73 @@ +package types + +import ( + "fmt" + "path" +) + +type Datastore struct { + Type *string `json:"type,omitempty"` + + K8sDqlitePort *int `json:"k8s-dqlite-port,omitempty"` + K8sDqliteCert *string `json:"k8s-dqlite-crt,omitempty"` + K8sDqliteKey *string `json:"k8s-dqlite-key,omitempty"` + + ExternalURL *string `json:"external-url,omitempty"` + ExternalCACert *string `json:"external-ca-crt,omitempty"` + ExternalClientCert *string `json:"external-client-crt,omitempty"` + ExternalClientKey *string `json:"external-client-key,omitempty"` +} + +func (c Datastore) GetType() string { return getField(c.Type) } +func (c Datastore) GetK8sDqlitePort() int { return getField(c.K8sDqlitePort) } +func (c Datastore) GetK8sDqliteCert() string { return getField(c.K8sDqliteCert) } +func (c Datastore) GetK8sDqliteKey() string { return getField(c.K8sDqliteKey) } +func (c Datastore) GetExternalURL() string { return getField(c.ExternalURL) } +func (c Datastore) GetExternalCACert() string { return getField(c.ExternalCACert) } +func (c Datastore) GetExternalClientCert() string { return getField(c.ExternalClientCert) } +func (c Datastore) GetExternalClientKey() string { return getField(c.ExternalClientKey) } +func (c Datastore) Empty() bool { + return c.Type == nil && c.K8sDqlitePort == nil && c.K8sDqliteCert == nil && c.K8sDqliteKey == nil && c.ExternalURL == nil && c.ExternalCACert == nil && c.ExternalClientCert == nil && c.ExternalClientKey == nil +} + +// DatastorePathsProvider is to avoid circular dependency for snap.Snap in Datastore.ToKubeAPIServerArguments() +type DatastorePathsProvider interface { + K8sDqliteStateDir() string + EtcdPKIDir() string +} + +// ToKubeAPIServerArguments returns updateArgs, deleteArgs that can be used with snaputil.UpdateServiceArguments() for the kube-apiserver +// according the datastore configuration. +func (c Datastore) ToKubeAPIServerArguments(p DatastorePathsProvider) (map[string]string, []string) { + var ( + updateArgs = make(map[string]string) + deleteArgs []string + ) + + switch c.GetType() { + case "k8s-dqlite": + updateArgs["--etcd-servers"] = fmt.Sprintf("unix://%s", path.Join(p.K8sDqliteStateDir(), "k8s-dqlite.sock")) + deleteArgs = []string{"--etcd-cafile", "--etcd-certfile", "--etcd-keyfile"} + case "external": + updateArgs["--etcd-servers"] = c.GetExternalURL() + + // the certificates will be written by setup.EnsureExtDatastorePKI(), here we only set the paths + for _, loop := range []struct { + arg string + cert string + path string + }{ + {cert: c.GetExternalCACert(), arg: "--etcd-cafile", path: "ca.crt"}, + {cert: c.GetExternalClientCert(), arg: "--etcd-certfile", path: "client.crt"}, + {cert: c.GetExternalClientKey(), arg: "--etcd-keyfile", path: "client.key"}, + } { + if loop.cert != "" { + updateArgs[loop.arg] = path.Join(p.EtcdPKIDir(), loop.path) + } else { + deleteArgs = append(deleteArgs, loop.arg) + } + } + } + + return updateArgs, deleteArgs +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_datastore_test.go b/src/k8s/pkg/k8sd/types/cluster_config_datastore_test.go new file mode 100644 index 000000000..3f402a030 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_datastore_test.go @@ -0,0 +1,78 @@ +package types_test + +import ( + "testing" + + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/snap/mock" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func TestDatastoreToKubeAPIServerArguments(t *testing.T) { + snap := &mock.Snap{ + Mock: mock.Mock{ + K8sDqliteStateDir: "/k8s-dqlite", + EtcdPKIDir: "/pki/etcd", + }, + } + + for _, tc := range []struct { + name string + config types.Datastore + expectUpdateArgs map[string]string + expectDeleteArgs []string + }{ + { + name: "Nil", + expectUpdateArgs: map[string]string{}, + }, + { + name: "K8sDqlite", + config: types.Datastore{ + Type: vals.Pointer("k8s-dqlite"), + }, + expectUpdateArgs: map[string]string{ + "--etcd-servers": "unix:///k8s-dqlite/k8s-dqlite.sock", + }, + expectDeleteArgs: []string{"--etcd-cafile", "--etcd-certfile", "--etcd-keyfile"}, + }, + { + name: "ExternalFull", + config: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("https://10.0.0.10:2379,https://10.0.0.11:2379"), + ExternalCACert: vals.Pointer("data"), + ExternalClientCert: vals.Pointer("data"), + ExternalClientKey: vals.Pointer("data"), + }, + expectUpdateArgs: map[string]string{ + "--etcd-servers": "https://10.0.0.10:2379,https://10.0.0.11:2379", + "--etcd-cafile": "/pki/etcd/ca.crt", + "--etcd-certfile": "/pki/etcd/client.crt", + "--etcd-keyfile": "/pki/etcd/client.key", + }, + }, + { + name: "ExternalOnlyCA", + config: types.Datastore{ + Type: vals.Pointer("external"), + ExternalURL: vals.Pointer("https://10.0.0.10:2379,https://10.0.0.11:2379"), + ExternalCACert: vals.Pointer("data"), + }, + expectUpdateArgs: map[string]string{ + "--etcd-servers": "https://10.0.0.10:2379,https://10.0.0.11:2379", + "--etcd-cafile": "/pki/etcd/ca.crt", + }, + expectDeleteArgs: []string{"--etcd-certfile", "--etcd-keyfile"}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + update, delete := tc.config.ToKubeAPIServerArguments(snap) + g.Expect(update).To(Equal(tc.expectUpdateArgs)) + g.Expect(delete).To(Equal(tc.expectDeleteArgs)) + }) + } +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_defaults.go b/src/k8s/pkg/k8sd/types/cluster_config_defaults.go new file mode 100644 index 000000000..fb97c4594 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_defaults.go @@ -0,0 +1,100 @@ +package types + +import "github.com/canonical/k8s/pkg/utils/vals" + +func (c *ClusterConfig) SetDefaults() { + // networking + if c.Network.Enabled == nil { + c.Network.Enabled = vals.Pointer(false) + } + if c.Network.GetPodCIDR() == "" { + c.Network.PodCIDR = vals.Pointer("10.1.0.0/16") + } + if c.Network.GetServiceCIDR() == "" { + c.Network.ServiceCIDR = vals.Pointer("10.152.183.0/24") + } + // kube-apiserver + if c.APIServer.GetSecurePort() == 0 { + c.APIServer.SecurePort = vals.Pointer(6443) + } + if c.APIServer.GetAuthorizationMode() == "" { + c.APIServer.AuthorizationMode = vals.Pointer("Node,RBAC") + } + // datastore + if c.Datastore.GetType() == "" { + c.Datastore.Type = vals.Pointer("k8s-dqlite") + } + if c.Datastore.GetK8sDqlitePort() == 0 { + c.Datastore.K8sDqlitePort = vals.Pointer(9000) + } + // kubelet + if c.Kubelet.GetClusterDomain() == "" { + c.Kubelet.ClusterDomain = vals.Pointer("cluster.local") + } + // dns + if c.DNS.Enabled == nil { + c.DNS.Enabled = vals.Pointer(false) + } + if len(c.DNS.GetUpstreamNameservers()) == 0 { + c.DNS.UpstreamNameservers = vals.Pointer([]string{"/etc/resolv.conf"}) + } + // local storage + if c.LocalStorage.Enabled == nil { + c.LocalStorage.Enabled = vals.Pointer(false) + } + if c.LocalStorage.GetLocalPath() == "" { + c.LocalStorage.LocalPath = vals.Pointer("/var/snap/k8s/common/rawfile-storage") + } + if c.LocalStorage.GetReclaimPolicy() == "" { + c.LocalStorage.ReclaimPolicy = vals.Pointer("Delete") + } + if c.LocalStorage.SetDefault == nil { + c.LocalStorage.SetDefault = vals.Pointer(true) + } + // load balancer + if c.LoadBalancer.Enabled == nil { + c.LoadBalancer.Enabled = vals.Pointer(false) + } + if c.LoadBalancer.CIDRs == nil { + c.LoadBalancer.CIDRs = vals.Pointer([]string{}) + } + if c.LoadBalancer.L2Mode == nil { + c.LoadBalancer.L2Mode = vals.Pointer(false) + } + if c.LoadBalancer.L2Interfaces == nil { + c.LoadBalancer.L2Interfaces = vals.Pointer([]string{}) + } + if c.LoadBalancer.BGPMode == nil { + c.LoadBalancer.BGPMode = vals.Pointer(false) + } + if c.LoadBalancer.BGPLocalASN == nil { + c.LoadBalancer.BGPLocalASN = vals.Pointer(0) + } + if c.LoadBalancer.BGPPeerAddress == nil { + c.LoadBalancer.BGPPeerAddress = vals.Pointer("") + } + if c.LoadBalancer.BGPPeerASN == nil { + c.LoadBalancer.BGPPeerASN = vals.Pointer(0) + } + if c.LoadBalancer.BGPPeerPort == nil { + c.LoadBalancer.BGPPeerPort = vals.Pointer(0) + } + // ingress + if c.Ingress.Enabled == nil { + c.Ingress.Enabled = vals.Pointer(false) + } + if c.Ingress.DefaultTLSSecret == nil { + c.Ingress.DefaultTLSSecret = vals.Pointer("") + } + if c.Ingress.EnableProxyProtocol == nil { + c.Ingress.EnableProxyProtocol = vals.Pointer(false) + } + // gateway + if c.Gateway.Enabled == nil { + c.Gateway.Enabled = vals.Pointer(false) + } + // metrics server + if c.MetricsServer.Enabled == nil { + c.MetricsServer.Enabled = vals.Pointer(false) + } +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_defaults_test.go b/src/k8s/pkg/k8sd/types/cluster_config_defaults_test.go new file mode 100644 index 000000000..e797c3430 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_defaults_test.go @@ -0,0 +1,69 @@ +package types_test + +import ( + "testing" + + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func TestSetDefaults(t *testing.T) { + g := NewWithT(t) + clusterConfig := types.ClusterConfig{} + + // Set defaults + expectedConfig := types.ClusterConfig{ + Network: types.Network{ + Enabled: vals.Pointer(false), + PodCIDR: vals.Pointer("10.1.0.0/16"), + ServiceCIDR: vals.Pointer("10.152.183.0/24"), + }, + APIServer: types.APIServer{ + SecurePort: vals.Pointer(6443), + AuthorizationMode: vals.Pointer("Node,RBAC"), + }, + Datastore: types.Datastore{ + Type: vals.Pointer("k8s-dqlite"), + K8sDqlitePort: vals.Pointer(9000), + }, + Kubelet: types.Kubelet{ + ClusterDomain: vals.Pointer("cluster.local"), + }, + DNS: types.DNS{ + Enabled: vals.Pointer(false), + UpstreamNameservers: vals.Pointer([]string{"/etc/resolv.conf"}), + }, + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(false), + LocalPath: vals.Pointer("/var/snap/k8s/common/rawfile-storage"), + ReclaimPolicy: vals.Pointer("Delete"), + SetDefault: vals.Pointer(true), + }, + LoadBalancer: types.LoadBalancer{ + Enabled: vals.Pointer(false), + CIDRs: vals.Pointer([]string{}), + L2Mode: vals.Pointer(false), + L2Interfaces: vals.Pointer([]string{}), + BGPMode: vals.Pointer(false), + BGPLocalASN: vals.Pointer(0), + BGPPeerAddress: vals.Pointer(""), + BGPPeerASN: vals.Pointer(0), + BGPPeerPort: vals.Pointer(0), + }, + MetricsServer: types.MetricsServer{ + Enabled: vals.Pointer(false), + }, + Gateway: types.Gateway{ + Enabled: vals.Pointer(false), + }, + Ingress: types.Ingress{ + Enabled: vals.Pointer(false), + DefaultTLSSecret: vals.Pointer(""), + EnableProxyProtocol: vals.Pointer(false), + }, + } + + clusterConfig.SetDefaults() + g.Expect(clusterConfig).To(Equal(expectedConfig)) +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_features.go b/src/k8s/pkg/k8sd/types/cluster_config_features.go new file mode 100644 index 000000000..bed0395f7 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_features.go @@ -0,0 +1,77 @@ +package types + +type DNS struct { + Enabled *bool `json:"enabled,omitempty"` + UpstreamNameservers *[]string `json:"upstream-nameservers,omitempty"` +} + +type Ingress struct { + Enabled *bool `json:"enabled,omitempty"` + DefaultTLSSecret *string `json:"default-tls-secret,omitempty"` + EnableProxyProtocol *bool `json:"enable-proxy-protocol,omitempty"` +} + +type LoadBalancer struct { + Enabled *bool `json:"enabled,omitempty"` + CIDRs *[]string `json:"cidrs,omitempty"` + L2Mode *bool `json:"l2-mode,omitempty"` + L2Interfaces *[]string `json:"l2-interfaces,omitempty"` + BGPMode *bool `json:"bgp-mode,omitempty"` + BGPLocalASN *int `json:"bgp-local-asn,omitempty"` + BGPPeerAddress *string `json:"bgp-peer-address,omitempty"` + BGPPeerASN *int `json:"bgp-peer-asn,omitempty"` + BGPPeerPort *int `json:"bgp-peer-port,omitempty"` +} + +type Gateway struct { + Enabled *bool `json:"enabled,omitempty"` +} + +type MetricsServer struct { + Enabled *bool `json:"enabled,omitempty"` +} + +type LocalStorage struct { + Enabled *bool `json:"enabled,omitempty"` + LocalPath *string `json:"local-path,omitempty"` + ReclaimPolicy *string `json:"reclaim-policy,omitempty"` + SetDefault *bool `json:"set-default,omitempty"` +} + +func (c DNS) GetEnabled() bool { return getField(c.Enabled) } +func (c DNS) GetUpstreamNameservers() []string { return getField(c.UpstreamNameservers) } +func (c DNS) Empty() bool { return c.Enabled == nil && c.UpstreamNameservers == nil } + +func (c Ingress) GetEnabled() bool { return getField(c.Enabled) } +func (c Ingress) GetDefaultTLSSecret() string { return getField(c.DefaultTLSSecret) } +func (c Ingress) GetEnableProxyProtocol() bool { return getField(c.EnableProxyProtocol) } +func (c Ingress) Empty() bool { + return c.Enabled == nil && c.DefaultTLSSecret == nil && c.EnableProxyProtocol == nil +} + +func (c Gateway) GetEnabled() bool { return getField(c.Enabled) } +func (c Gateway) Empty() bool { return c.Enabled == nil } + +func (c LoadBalancer) GetEnabled() bool { return getField(c.Enabled) } +func (c LoadBalancer) GetCIDRs() []string { return getField(c.CIDRs) } +func (c LoadBalancer) GetL2Mode() bool { return getField(c.L2Mode) } +func (c LoadBalancer) GetL2Interfaces() []string { return getField(c.L2Interfaces) } +func (c LoadBalancer) GetBGPMode() bool { return getField(c.BGPMode) } +func (c LoadBalancer) GetBGPLocalASN() int { return getField(c.BGPLocalASN) } +func (c LoadBalancer) GetBGPPeerAddress() string { return getField(c.BGPPeerAddress) } +func (c LoadBalancer) GetBGPPeerASN() int { return getField(c.BGPPeerASN) } +func (c LoadBalancer) GetBGPPeerPort() int { return getField(c.BGPPeerPort) } +func (c LoadBalancer) Empty() bool { + return c.Enabled == nil && c.CIDRs == nil && c.L2Mode == nil && c.L2Interfaces == nil && c.BGPMode == nil && c.BGPLocalASN == nil && c.BGPPeerAddress == nil && c.BGPPeerASN == nil && c.BGPPeerPort == nil +} + +func (c LocalStorage) GetEnabled() bool { return getField(c.Enabled) } +func (c LocalStorage) GetLocalPath() string { return getField(c.LocalPath) } +func (c LocalStorage) GetReclaimPolicy() string { return getField(c.ReclaimPolicy) } +func (c LocalStorage) GetSetDefault() bool { return getField(c.SetDefault) } +func (c LocalStorage) Empty() bool { + return c.Enabled == nil && c.LocalPath == nil && c.ReclaimPolicy == nil && c.SetDefault == nil +} + +func (c MetricsServer) GetEnabled() bool { return getField(c.Enabled) } +func (c MetricsServer) Empty() bool { return c.Enabled == nil } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_kubelet.go b/src/k8s/pkg/k8sd/types/cluster_config_kubelet.go new file mode 100644 index 000000000..72e4eba6a --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_kubelet.go @@ -0,0 +1,51 @@ +package types + +type Kubelet struct { + CloudProvider *string `json:"cloud-provider,omitempty"` + ClusterDNS *string `json:"cluster-dns,omitempty"` + ClusterDomain *string `json:"cluster-domain,omitempty"` +} + +func (c Kubelet) GetCloudProvider() string { return getField(c.CloudProvider) } +func (c Kubelet) GetClusterDNS() string { return getField(c.ClusterDNS) } +func (c Kubelet) GetClusterDomain() string { return getField(c.ClusterDomain) } +func (c Kubelet) Empty() bool { + return c.CloudProvider == nil && c.ClusterDNS == nil && c.ClusterDomain == nil +} + +// ToConfigMap converts a Kubelet config to a map[string]string to store in a Kubernetes configmap. +func (c Kubelet) ToConfigMap() (map[string]string, error) { + data := make(map[string]string) + + if v := c.CloudProvider; v != nil { + data["cloud-provider"] = *v + } + if v := c.ClusterDNS; v != nil { + data["cluster-dns"] = *v + } + if v := c.ClusterDomain; v != nil { + data["cluster-domain"] = *v + } + + return data, nil +} + +// KubeletFromConfigMap parses configmap data into a Kubelet config. +func KubeletFromConfigMap(m map[string]string) (Kubelet, error) { + var c Kubelet + if m == nil { + return c, nil + } + + if v, ok := m["cloud-provider"]; ok { + c.CloudProvider = &v + } + if v, ok := m["cluster-dns"]; ok { + c.ClusterDNS = &v + } + if v, ok := m["cluster-domain"]; ok { + c.ClusterDomain = &v + } + + return c, nil +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_kubelet_test.go b/src/k8s/pkg/k8sd/types/cluster_config_kubelet_test.go new file mode 100644 index 000000000..b74be4a28 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_kubelet_test.go @@ -0,0 +1,87 @@ +package types_test + +import ( + "testing" + + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func TestKubelet(t *testing.T) { + for _, tc := range []struct { + name string + kubelet types.Kubelet + configmap map[string]string + }{ + { + name: "Nil", + configmap: map[string]string{}, + }, + { + name: "Empty", + configmap: map[string]string{ + "cluster-dns": "", + "cluster-domain": "", + "cloud-provider": "", + }, + kubelet: types.Kubelet{ + ClusterDNS: vals.Pointer(""), + ClusterDomain: vals.Pointer(""), + CloudProvider: vals.Pointer(""), + }, + }, + { + name: "OnlyProvider", + configmap: map[string]string{ + "cloud-provider": "external", + }, + kubelet: types.Kubelet{ + CloudProvider: vals.Pointer("external"), + }, + }, + { + name: "OnlyDNS", + configmap: map[string]string{ + "cluster-dns": "1.1.1.1", + "cluster-domain": "cluster.local", + }, + kubelet: types.Kubelet{ + ClusterDNS: vals.Pointer("1.1.1.1"), + ClusterDomain: vals.Pointer("cluster.local"), + }, + }, + { + name: "All", + configmap: map[string]string{ + "cluster-dns": "1.1.1.1", + "cluster-domain": "cluster.local", + "cloud-provider": "external", + }, + kubelet: types.Kubelet{ + ClusterDNS: vals.Pointer("1.1.1.1"), + ClusterDomain: vals.Pointer("cluster.local"), + CloudProvider: vals.Pointer("external"), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Run("ToConfigMap", func(t *testing.T) { + g := NewWithT(t) + + cm, err := tc.kubelet.ToConfigMap() + g.Expect(err).To(BeNil()) + g.Expect(cm).To(Equal(tc.configmap)) + }) + + t.Run("FromConfigMap", func(t *testing.T) { + g := NewWithT(t) + + k, err := types.KubeletFromConfigMap(tc.configmap) + g.Expect(err).To(BeNil()) + g.Expect(k).To(Equal(tc.kubelet)) + }) + }) + } + +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_merge.go b/src/k8s/pkg/k8sd/types/cluster_config_merge.go index 2bde66f6f..da203b7b0 100644 --- a/src/k8s/pkg/k8sd/types/cluster_config_merge.go +++ b/src/k8s/pkg/k8sd/types/cluster_config_merge.go @@ -2,118 +2,104 @@ package types import ( "fmt" - "slices" ) -func mergeValue[T comparable](old T, new T, allowChange bool) (T, error) { - var zeroValue T - if old != zeroValue && new != zeroValue && new != old && !allowChange { - return zeroValue, fmt.Errorf("value has changed") - } - if new != zeroValue { - return new, nil - } - return old, nil -} - -func mergeSlice[T comparable](old []T, new []T, allowChange bool) ([]T, error) { - if old != nil && new != nil && slices.Equal(old, new) && !allowChange { - return nil, fmt.Errorf("value has changed") - } - if new != nil { - return new, nil - } - return old, nil -} - // MergeClusterConfig applies updates from non-empty values of the new ClusterConfig to an existing one. // MergeClusterConfig will return an error if we try to update a config that must not be updated. once such an operation is implemented in the future, we can allow the change here. // MergeClusterConfig will create a new ClusterConfig object to avoid mutating the existing config objects. +// MergeClusterConfig will check that the new ClusterConfig is valid, and returns an error otherwise. func MergeClusterConfig(existing ClusterConfig, new ClusterConfig) (ClusterConfig, error) { var ( config ClusterConfig err error ) + // update string fields for _, i := range []struct { name string - val *string - old string - new string + val **string + old *string + new *string allowChange bool }{ - {name: "cluster CA certificate", val: &config.Certificates.CACert, old: existing.Certificates.CACert, new: new.Certificates.CACert}, - {name: "cluster CA key", val: &config.Certificates.CAKey, old: existing.Certificates.CAKey, new: new.Certificates.CAKey}, - {name: "k8s-dqlite certificate", val: &config.Certificates.K8sDqliteCert, old: existing.Certificates.K8sDqliteCert, new: new.Certificates.K8sDqliteCert}, - {name: "k8s-dqlite key", val: &config.Certificates.K8sDqliteKey, old: existing.Certificates.K8sDqliteKey, new: new.Certificates.K8sDqliteKey}, + // certificates + {name: "CA certificate", val: &config.Certificates.CACert, old: existing.Certificates.CACert, new: new.Certificates.CACert}, + {name: "CA key", val: &config.Certificates.CAKey, old: existing.Certificates.CAKey, new: new.Certificates.CAKey}, {name: "apiserver-kubelet-client certificate", val: &config.Certificates.APIServerKubeletClientCert, old: existing.Certificates.APIServerKubeletClientCert, new: new.Certificates.APIServerKubeletClientCert, allowChange: true}, {name: "apiserver-kubelet-client key", val: &config.Certificates.APIServerKubeletClientKey, old: existing.Certificates.APIServerKubeletClientKey, new: new.Certificates.APIServerKubeletClientKey, allowChange: true}, - {name: "front proxy CA certificate", val: &config.Certificates.FrontProxyCACert, old: existing.Certificates.FrontProxyCACert, new: new.Certificates.FrontProxyCACert, allowChange: true}, - {name: "front proxy CA key", val: &config.Certificates.FrontProxyCAKey, old: existing.Certificates.FrontProxyCAKey, new: new.Certificates.FrontProxyCAKey, allowChange: true}, - {name: "datastore ca", val: &config.Certificates.DatastoreCACert, old: existing.Certificates.DatastoreCACert, new: new.Certificates.DatastoreCACert, allowChange: true}, - {name: "datastore client certificate", val: &config.Certificates.DatastoreClientCert, old: existing.Certificates.DatastoreClientCert, new: new.Certificates.DatastoreClientCert, allowChange: true}, - {name: "datastore client key", val: &config.Certificates.DatastoreClientKey, old: existing.Certificates.DatastoreClientKey, new: new.Certificates.DatastoreClientKey, allowChange: true}, - {name: "authorization-mode", val: &config.APIServer.AuthorizationMode, old: existing.APIServer.AuthorizationMode, new: new.APIServer.AuthorizationMode, allowChange: true}, - {name: "service account key", val: &config.APIServer.ServiceAccountKey, old: existing.APIServer.ServiceAccountKey, new: new.APIServer.ServiceAccountKey}, - {name: "pod cidr", val: &config.Network.PodCIDR, old: existing.Network.PodCIDR, new: new.Network.PodCIDR}, - {name: "service cidr", val: &config.Network.ServiceCIDR, old: existing.Network.ServiceCIDR, new: new.Network.ServiceCIDR}, - {name: "datastore", val: &config.APIServer.Datastore, old: existing.APIServer.Datastore, new: new.APIServer.Datastore}, - {name: "datastore url", val: &config.APIServer.DatastoreURL, old: existing.APIServer.DatastoreURL, new: new.APIServer.DatastoreURL, allowChange: true}, - {name: "cluster dns", val: &config.Kubelet.ClusterDNS, old: existing.Kubelet.ClusterDNS, new: new.Kubelet.ClusterDNS, allowChange: true}, - {name: "cluster domain", val: &config.Kubelet.ClusterDomain, old: existing.Kubelet.ClusterDomain, new: new.Kubelet.ClusterDomain, allowChange: true}, - {name: "cloud provider", val: &config.Kubelet.CloudProvider, old: existing.Kubelet.CloudProvider, new: new.Kubelet.CloudProvider, allowChange: true}, - - {name: "ingress.default-tls-secret", val: &config.Ingress.DefaultTLSSecret, old: existing.Ingress.DefaultTLSSecret, new: new.Ingress.DefaultTLSSecret, allowChange: true}, - - {name: "load-balancer.bgp-peer-address", val: &config.LoadBalancer.BGPPeerAddress, old: existing.LoadBalancer.BGPPeerAddress, new: new.LoadBalancer.BGPPeerAddress, allowChange: true}, - - {name: "local-storage.local-path", val: &config.LocalStorage.LocalPath, old: existing.LocalStorage.LocalPath, new: new.LocalStorage.LocalPath, allowChange: true}, - {name: "local-storage.set-default", val: &config.LocalStorage.ReclaimPolicy, old: existing.LocalStorage.ReclaimPolicy, new: new.LocalStorage.ReclaimPolicy, allowChange: true}, + {name: "front proxy CA certificate", val: &config.Certificates.FrontProxyCACert, old: existing.Certificates.FrontProxyCACert, new: new.Certificates.FrontProxyCACert}, + {name: "front proxy CA key", val: &config.Certificates.FrontProxyCAKey, old: existing.Certificates.FrontProxyCAKey, new: new.Certificates.FrontProxyCAKey}, + {name: "service account key", val: &config.Certificates.ServiceAccountKey, old: existing.Certificates.ServiceAccountKey, new: new.Certificates.ServiceAccountKey}, + // datastore + {name: "datastore type", val: &config.Datastore.Type, old: existing.Datastore.Type, new: new.Datastore.Type}, + {name: "k8s-dqlite certificate", val: &config.Datastore.K8sDqliteCert, old: existing.Datastore.K8sDqliteCert, new: new.Datastore.K8sDqliteCert}, + {name: "k8s-dqlite key", val: &config.Datastore.K8sDqliteKey, old: existing.Datastore.K8sDqliteKey, new: new.Datastore.K8sDqliteKey}, + {name: "external datastore URL", val: &config.Datastore.ExternalURL, old: existing.Datastore.ExternalURL, new: new.Datastore.ExternalURL, allowChange: true}, + {name: "external datastore CA certificate", val: &config.Datastore.ExternalCACert, old: existing.Datastore.ExternalCACert, new: new.Datastore.ExternalCACert, allowChange: true}, + {name: "external datastore client certificate", val: &config.Datastore.ExternalClientCert, old: existing.Datastore.ExternalClientCert, new: new.Datastore.ExternalClientCert, allowChange: true}, + {name: "external datastore client key", val: &config.Datastore.ExternalClientKey, old: existing.Datastore.ExternalClientKey, new: new.Datastore.ExternalClientKey, allowChange: true}, + // network + {name: "pod CIDR", val: &config.Network.PodCIDR, old: existing.Network.PodCIDR, new: new.Network.PodCIDR}, + {name: "service CIDR", val: &config.Network.ServiceCIDR, old: existing.Network.ServiceCIDR, new: new.Network.ServiceCIDR}, + // apiserver + {name: "kube-apiserver authorization mode", val: &config.APIServer.AuthorizationMode, old: existing.APIServer.AuthorizationMode, new: new.APIServer.AuthorizationMode, allowChange: true}, + // kubelet + {name: "kubelet cluster DNS", val: &config.Kubelet.ClusterDNS, old: existing.Kubelet.ClusterDNS, new: new.Kubelet.ClusterDNS, allowChange: true}, + {name: "kubelet cluster domain", val: &config.Kubelet.ClusterDomain, old: existing.Kubelet.ClusterDomain, new: new.Kubelet.ClusterDomain, allowChange: true}, + {name: "kubelet cloud provider", val: &config.Kubelet.CloudProvider, old: existing.Kubelet.CloudProvider, new: new.Kubelet.CloudProvider, allowChange: true}, + // ingress + {name: "ingress default TLS secret", val: &config.Ingress.DefaultTLSSecret, old: existing.Ingress.DefaultTLSSecret, new: new.Ingress.DefaultTLSSecret, allowChange: true}, + // load balancer + {name: "load balancer BGP peer address", val: &config.LoadBalancer.BGPPeerAddress, old: existing.LoadBalancer.BGPPeerAddress, new: new.LoadBalancer.BGPPeerAddress, allowChange: true}, + // local storage + {name: "local storage path", val: &config.LocalStorage.LocalPath, old: existing.LocalStorage.LocalPath, new: new.LocalStorage.LocalPath, allowChange: !existing.LocalStorage.GetEnabled() || !new.LocalStorage.GetEnabled()}, + {name: "local storage reclaim policy", val: &config.LocalStorage.ReclaimPolicy, old: existing.LocalStorage.ReclaimPolicy, new: new.LocalStorage.ReclaimPolicy, allowChange: !existing.LocalStorage.GetEnabled() || !new.LocalStorage.GetEnabled()}, } { - *i.val, err = mergeValue(i.old, i.new, i.allowChange) - if err != nil { + if *i.val, err = mergeField(i.old, i.new, i.allowChange); err != nil { return ClusterConfig{}, fmt.Errorf("prevented update of %s: %w", i.name, err) } } + // update string slice fields for _, i := range []struct { name string - val *int - old int - new int + val **[]string + old *[]string + new *[]string allowChange bool }{ - {name: "secure port", val: &config.APIServer.SecurePort, old: existing.APIServer.SecurePort, new: new.APIServer.SecurePort}, - {name: "k8s-dqlite port", val: &config.K8sDqlite.Port, old: existing.K8sDqlite.Port, new: new.K8sDqlite.Port}, - - {name: "load-balancer.bgp-local-asn", val: &config.LoadBalancer.BGPLocalASN, old: existing.LoadBalancer.BGPLocalASN, new: new.LoadBalancer.BGPLocalASN, allowChange: true}, - {name: "load-balancer.bgp-peer-asn", val: &config.LoadBalancer.BGPPeerASN, old: existing.LoadBalancer.BGPPeerASN, new: new.LoadBalancer.BGPPeerASN, allowChange: true}, - {name: "load-balancer.bgp-peer-port", val: &config.LoadBalancer.BGPPeerPort, old: existing.LoadBalancer.BGPPeerPort, new: new.LoadBalancer.BGPPeerPort, allowChange: true}, + {name: "DNS upstream nameservers", val: &config.DNS.UpstreamNameservers, old: existing.DNS.UpstreamNameservers, new: new.DNS.UpstreamNameservers, allowChange: true}, + {name: "load balancer CIDRs", val: &config.LoadBalancer.CIDRs, old: existing.LoadBalancer.CIDRs, new: new.LoadBalancer.CIDRs, allowChange: true}, + {name: "load balancer L2 interfaces", val: &config.LoadBalancer.L2Interfaces, old: existing.LoadBalancer.L2Interfaces, new: new.LoadBalancer.L2Interfaces, allowChange: true}, } { - *i.val, err = mergeValue(i.old, i.new, i.allowChange) - if err != nil { + if *i.val, err = mergeSliceField(i.old, i.new, i.allowChange); err != nil { return ClusterConfig{}, fmt.Errorf("prevented update of %s: %w", i.name, err) } } + // update int fields for _, i := range []struct { name string - val *[]string - old []string - new []string + val **int + old *int + new *int allowChange bool }{ - {name: "dns.upstream-nameservers", val: &config.DNS.UpstreamNameservers, old: existing.DNS.UpstreamNameservers, new: new.DNS.UpstreamNameservers, allowChange: true}, - - {name: "load-balancer.cidrs", val: &config.LoadBalancer.CIDRs, old: existing.LoadBalancer.CIDRs, new: new.LoadBalancer.CIDRs, allowChange: true}, - {name: "load-balancer.l2-interfaces", val: &config.LoadBalancer.L2Interfaces, old: existing.LoadBalancer.L2Interfaces, new: new.LoadBalancer.L2Interfaces, allowChange: true}, + // apiserver + {name: "kube-apiserver secure port", val: &config.APIServer.SecurePort, old: existing.APIServer.SecurePort, new: new.APIServer.SecurePort}, + // datastore + {name: "k8s-dqlite port", val: &config.Datastore.K8sDqlitePort, old: existing.Datastore.K8sDqlitePort, new: new.Datastore.K8sDqlitePort}, + // load-balancer + {name: "load balancer BGP local ASN", val: &config.LoadBalancer.BGPLocalASN, old: existing.LoadBalancer.BGPLocalASN, new: new.LoadBalancer.BGPLocalASN, allowChange: true}, + {name: "load balancer BGP peer ASN", val: &config.LoadBalancer.BGPPeerASN, old: existing.LoadBalancer.BGPPeerASN, new: new.LoadBalancer.BGPPeerASN, allowChange: true}, + {name: "load balancer BGP peer port", val: &config.LoadBalancer.BGPPeerPort, old: existing.LoadBalancer.BGPPeerPort, new: new.LoadBalancer.BGPPeerPort, allowChange: true}, } { - *i.val, err = mergeSlice(i.old, i.new, i.allowChange) - if err != nil { + if *i.val, err = mergeField(i.old, i.new, i.allowChange); err != nil { return ClusterConfig{}, fmt.Errorf("prevented update of %s: %w", i.name, err) } } + // update bool fields for _, i := range []struct { name string val **bool @@ -121,26 +107,33 @@ func MergeClusterConfig(existing ClusterConfig, new ClusterConfig) (ClusterConfi new *bool allowChange bool }{ - {name: "network.enabled", val: &config.Network.Enabled, old: existing.Network.Enabled, new: new.Network.Enabled, allowChange: true}, - {name: "dns.enabled", val: &config.DNS.Enabled, old: existing.DNS.Enabled, new: new.DNS.Enabled, allowChange: true}, - {name: "gateway.enabled", val: &config.Gateway.Enabled, old: existing.Gateway.Enabled, new: new.Gateway.Enabled, allowChange: true}, - {name: "ingress.enabled", val: &config.Ingress.Enabled, old: existing.Ingress.Enabled, new: new.Ingress.Enabled, allowChange: true}, - {name: "load-balancer.enabled", val: &config.LoadBalancer.Enabled, old: existing.LoadBalancer.Enabled, new: new.LoadBalancer.Enabled, allowChange: true}, - {name: "local-storage.enabled", val: &config.LocalStorage.Enabled, old: existing.LocalStorage.Enabled, new: new.LocalStorage.Enabled, allowChange: true}, - {name: "metrics-server.enabled", val: &config.MetricsServer.Enabled, old: existing.MetricsServer.Enabled, new: new.MetricsServer.Enabled, allowChange: true}, - - {name: "ingress.enable-proxy-protocol", val: &config.Ingress.EnableProxyProtocol, old: existing.Ingress.EnableProxyProtocol, new: new.Ingress.EnableProxyProtocol, allowChange: true}, - - {name: "load-balancer.l2-mode", val: &config.LoadBalancer.L2Enabled, old: existing.LoadBalancer.L2Enabled, new: new.LoadBalancer.L2Enabled, allowChange: true}, - {name: "load-balancer.bgp-mode", val: &config.LoadBalancer.BGPEnabled, old: existing.LoadBalancer.BGPEnabled, new: new.LoadBalancer.BGPEnabled, allowChange: true}, - - {name: "local-storage.set-default", val: &config.LocalStorage.SetDefault, old: existing.LocalStorage.SetDefault, new: new.LocalStorage.SetDefault, allowChange: true}, + // network + {name: "network enabled", val: &config.Network.Enabled, old: existing.Network.Enabled, new: new.Network.Enabled, allowChange: true}, + // DNS + {name: "DNS enabled", val: &config.DNS.Enabled, old: existing.DNS.Enabled, new: new.DNS.Enabled, allowChange: true}, + // gateway + {name: "gateway enabled", val: &config.Gateway.Enabled, old: existing.Gateway.Enabled, new: new.Gateway.Enabled, allowChange: true}, + // ingress + {name: "ingress enabled", val: &config.Ingress.Enabled, old: existing.Ingress.Enabled, new: new.Ingress.Enabled, allowChange: true}, + {name: "ingress enable proxy protocol", val: &config.Ingress.EnableProxyProtocol, old: existing.Ingress.EnableProxyProtocol, new: new.Ingress.EnableProxyProtocol, allowChange: true}, + // load-balancer + {name: "load balancer enabled", val: &config.LoadBalancer.Enabled, old: existing.LoadBalancer.Enabled, new: new.LoadBalancer.Enabled, allowChange: true}, + {name: "load balancer L2 mode", val: &config.LoadBalancer.L2Mode, old: existing.LoadBalancer.L2Mode, new: new.LoadBalancer.L2Mode, allowChange: true}, + {name: "load balancer BGP mode", val: &config.LoadBalancer.BGPMode, old: existing.LoadBalancer.BGPMode, new: new.LoadBalancer.BGPMode, allowChange: true}, + // local-storage + {name: "local storage enabled", val: &config.LocalStorage.Enabled, old: existing.LocalStorage.Enabled, new: new.LocalStorage.Enabled, allowChange: true}, + {name: "local storage set default", val: &config.LocalStorage.SetDefault, old: existing.LocalStorage.SetDefault, new: new.LocalStorage.SetDefault, allowChange: true}, + // metrics-server + {name: "metrics server enabled", val: &config.MetricsServer.Enabled, old: existing.MetricsServer.Enabled, new: new.MetricsServer.Enabled, allowChange: true}, } { - *i.val, err = mergeValue(i.old, i.new, i.allowChange) - if err != nil { + if *i.val, err = mergeField(i.old, i.new, i.allowChange); err != nil { return ClusterConfig{}, fmt.Errorf("prevented update of %s: %w", i.name, err) } } + if err := config.Validate(); err != nil { + return ClusterConfig{}, fmt.Errorf("updated cluster configuration is not valid: %w", err) + } + return config, nil } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_merge_test.go b/src/k8s/pkg/k8sd/types/cluster_config_merge_test.go index 004c9dcf3..1fc932d7d 100644 --- a/src/k8s/pkg/k8sd/types/cluster_config_merge_test.go +++ b/src/k8s/pkg/k8sd/types/cluster_config_merge_test.go @@ -1,63 +1,340 @@ -package types +package types_test import ( + "fmt" "testing" + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils/vals" . "github.com/onsi/gomega" ) -func Test_mergeValue(t *testing.T) { - t.Run("string", func(t *testing.T) { - for _, tc := range []struct { - name string - old string - new string - allowChange bool - expectErr bool - expectVal string - }{ - {name: "set-empty", new: "val", expectVal: "val"}, - {name: "keep-old", old: "val", expectVal: "val"}, - {name: "update", old: "val", new: "newVal", allowChange: true, expectVal: "newVal"}, - {name: "update-not-allowed", old: "val", new: "newVal", expectErr: true}, - } { - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - result, err := mergeValue(tc.old, tc.new, tc.allowChange) - if tc.expectErr { - g.Expect(err).ToNot(BeNil()) - } else { - g.Expect(err).To(BeNil()) - g.Expect(result).To(Equal(tc.expectVal)) - } - }) - } - }) - - t.Run("int", func(t *testing.T) { - for _, tc := range []struct { - name string - old int - new int - allowChange bool - expectErr bool - expectVal int - }{ - {name: "set-empty", new: 100, expectVal: 100}, - {name: "keep-old", old: 100, expectVal: 100}, - {name: "update", old: 100, new: 200, allowChange: true, expectVal: 200}, - {name: "update-not-allowed", old: 100, new: 200, expectErr: true}, - } { +type mergeClusterConfigTestCase struct { + name string + old types.ClusterConfig + new types.ClusterConfig + expectResult types.ClusterConfig + expectErr bool +} + +func generateMergeClusterConfigTestCases[T any](field string, changeAllowed bool, val1 T, val2 T, update func(*types.ClusterConfig, any)) []mergeClusterConfigTestCase { + var cfgNil, cfgZero, cfgOne, cfgTwo types.ClusterConfig + var zero T + + // defaults for validation + for _, cfg := range []*types.ClusterConfig{&cfgNil, &cfgZero, &cfgOne, &cfgTwo} { + cfg.Network.PodCIDR = vals.Pointer("10.1.0.0/16") + cfg.Network.ServiceCIDR = vals.Pointer("10.152.183.0/24") + } + + update(&cfgZero, zero) + update(&cfgOne, val1) + update(&cfgTwo, val2) + + return []mergeClusterConfigTestCase{ + { + name: fmt.Sprintf("%s/Empty", field), + old: cfgNil, + new: cfgNil, + expectResult: cfgNil, + }, + { + name: fmt.Sprintf("%s/Set", field), + new: cfgOne, + expectResult: cfgOne, + expectErr: false, + }, + { + name: fmt.Sprintf("%s/Keep", field), + old: cfgOne, + new: cfgNil, + expectResult: cfgOne, + }, + { + name: fmt.Sprintf("%s/Update", field), + old: cfgOne, + new: cfgTwo, + expectResult: cfgTwo, + expectErr: !changeAllowed, + }, + { + name: fmt.Sprintf("%s/Unset", field), + old: cfgOne, + new: cfgZero, + expectResult: cfgZero, + expectErr: !changeAllowed, + }, + } +} + +func TestMergeClusterConfig(t *testing.T) { + for _, tcs := range [][]mergeClusterConfigTestCase{ + generateMergeClusterConfigTestCases("Certificates/CACert", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.CACert = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Certificates/CAKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.CAKey = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Certificates/FrontProxyCACert", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.FrontProxyCACert = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Certificates/FrontProxyCAKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.FrontProxyCAKey = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Certificates/ServiceAccountKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.ServiceAccountKey = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Certificates/APIServerKubeletClientCert", true, "v1", "v2", func(c *types.ClusterConfig, v any) { + c.Certificates.APIServerKubeletClientCert = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("Certificates/APIServerKubeletClientKey", true, "v1", "v2", func(c *types.ClusterConfig, v any) { + c.Certificates.APIServerKubeletClientKey = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("Datastore/Type", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.Type = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Datastore/K8sDqliteCert", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.K8sDqliteCert = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Datastore/K8sDqliteKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.K8sDqliteKey = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Datastore/K8sDqlitePort", false, 6443, 16443, func(c *types.ClusterConfig, v any) { c.Datastore.K8sDqlitePort = vals.Pointer(v.(int)) }), + generateMergeClusterConfigTestCases("Datastore/ExternalURL", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.ExternalURL = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Datastore/ExternalCACert", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.ExternalCACert = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Datastore/ExternalClientCert", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.ExternalClientCert = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Datastore/ExternalClientKey", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Datastore.ExternalClientKey = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Network/Enable", true, true, false, func(c *types.ClusterConfig, v any) { c.Network.Enabled = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("Network/Disable", true, false, true, func(c *types.ClusterConfig, v any) { c.Network.Enabled = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("Network/PodCIDR", false, "10.1.0.0/16", "10.2.0.0/16", func(c *types.ClusterConfig, v any) { c.Network.PodCIDR = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Network/ServiceCIDR", false, "10.152.183.0/24", "10.152.184.0/24", func(c *types.ClusterConfig, v any) { c.Network.ServiceCIDR = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("APIServer/SecurePort", false, 6443, 16443, func(c *types.ClusterConfig, v any) { c.APIServer.SecurePort = vals.Pointer(v.(int)) }), + generateMergeClusterConfigTestCases("APIServer/AuthorizationMode", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.APIServer.AuthorizationMode = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Kubelet/CloudProvider", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Kubelet.CloudProvider = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Kubelet/ClusterDNS", true, "1.1.1.1", "2.2.2.2", func(c *types.ClusterConfig, v any) { c.Kubelet.ClusterDNS = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Kubelet/ClusterDomain", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Kubelet.ClusterDomain = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("DNS/Enable", true, true, false, func(c *types.ClusterConfig, v any) { c.DNS.Enabled = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("DNS/Disable", true, false, true, func(c *types.ClusterConfig, v any) { c.DNS.Enabled = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("DNS/UpstreamNameservers", true, []string{"c1"}, []string{"c2"}, func(c *types.ClusterConfig, v any) { + c.DNS.UpstreamNameservers = vals.Pointer(v.([]string)) + }), + generateMergeClusterConfigTestCases("Ingress/Enable", true, false, true, func(c *types.ClusterConfig, v any) { + c.Network.Enabled = vals.Pointer(true) + c.Ingress.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("Ingress/Disable", true, true, false, func(c *types.ClusterConfig, v any) { + c.Network.Enabled = vals.Pointer(true) + c.Ingress.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("Ingress/DefaultTLSSecret", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Ingress.DefaultTLSSecret = vals.Pointer(v.(string)) }), + generateMergeClusterConfigTestCases("Ingress/EnableProxyProtocol/Enable", true, true, false, func(c *types.ClusterConfig, v any) { + c.Ingress.EnableProxyProtocol = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("Ingress/EnableProxyProtocol/Disable", true, false, true, func(c *types.ClusterConfig, v any) { + c.Ingress.EnableProxyProtocol = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("Gateway/Enable", true, true, false, func(c *types.ClusterConfig, v any) { + c.Network.Enabled = vals.Pointer(true) + c.Gateway.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("Gateway/Disable", true, false, true, func(c *types.ClusterConfig, v any) { + c.Network.Enabled = vals.Pointer(true) + c.Gateway.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("LoadBalancer/Enable", true, true, false, func(c *types.ClusterConfig, v any) { + c.Network.Enabled = vals.Pointer(true) + c.LoadBalancer.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("LoadBalancer/Disable", true, false, true, func(c *types.ClusterConfig, v any) { + c.Network.Enabled = vals.Pointer(true) + c.LoadBalancer.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("LoadBalancer/CIDRs", true, []string{"172.16.101.0/24"}, []string{"172.16.100.0/24"}, func(c *types.ClusterConfig, v any) { + c.LoadBalancer.CIDRs = vals.Pointer(v.([]string)) + }), + generateMergeClusterConfigTestCases("LoadBalancer/L2Mode/Enable", true, true, false, func(c *types.ClusterConfig, v any) { c.LoadBalancer.L2Mode = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("LoadBalancer/L2Mode/Disable", true, false, true, func(c *types.ClusterConfig, v any) { c.LoadBalancer.L2Mode = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("LoadBalancer/L2Interfaces", true, []string{"c1"}, []string{"c2"}, func(c *types.ClusterConfig, v any) { + c.LoadBalancer.L2Interfaces = vals.Pointer(v.([]string)) + }), + generateMergeClusterConfigTestCases("LoadBalancer/BGPMode/Enable", true, true, false, func(c *types.ClusterConfig, v any) { + c.LoadBalancer.BGPMode = vals.Pointer(v.(bool)) + c.LoadBalancer.BGPLocalASN = vals.Pointer(100) + c.LoadBalancer.BGPPeerAddress = vals.Pointer("10.10.0.0/16") + c.LoadBalancer.BGPPeerASN = vals.Pointer(101) + c.LoadBalancer.BGPPeerPort = vals.Pointer(10010) + }), + generateMergeClusterConfigTestCases("LoadBalancer/BGPMode/Disable", true, false, true, func(c *types.ClusterConfig, v any) { + c.LoadBalancer.BGPMode = vals.Pointer(v.(bool)) + c.LoadBalancer.BGPLocalASN = vals.Pointer(100) + c.LoadBalancer.BGPPeerAddress = vals.Pointer("10.10.0.0/16") + c.LoadBalancer.BGPPeerASN = vals.Pointer(101) + c.LoadBalancer.BGPPeerPort = vals.Pointer(10010) + }), + generateMergeClusterConfigTestCases("LoadBalancer/BGPLocalASN", true, 6443, 16443, func(c *types.ClusterConfig, v any) { c.LoadBalancer.BGPLocalASN = vals.Pointer(v.(int)) }), + generateMergeClusterConfigTestCases("LoadBalancer/BGPPeerAddress", true, "a1", "a2", func(c *types.ClusterConfig, v any) { + c.LoadBalancer.BGPPeerAddress = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("LoadBalancer/BGPPeerASN", true, 6443, 16443, func(c *types.ClusterConfig, v any) { c.LoadBalancer.BGPPeerASN = vals.Pointer(v.(int)) }), + generateMergeClusterConfigTestCases("LoadBalancer/BGPPeerPort", true, 6443, 16443, func(c *types.ClusterConfig, v any) { c.LoadBalancer.BGPPeerPort = vals.Pointer(v.(int)) }), + generateMergeClusterConfigTestCases("LocalStorage/Enable", true, true, false, func(c *types.ClusterConfig, v any) { + c.LocalStorage.LocalPath = vals.Pointer("path") + c.LocalStorage.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("LocalStorage/Disable", true, false, true, func(c *types.ClusterConfig, v any) { + c.LocalStorage.LocalPath = vals.Pointer("path") + c.LocalStorage.Enabled = vals.Pointer(v.(bool)) + }), + generateMergeClusterConfigTestCases("LocalStorage/LocalPath/AllowChange", true, "a1", "a2", func(c *types.ClusterConfig, v any) { + c.LocalStorage.LocalPath = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("LocalStorage/LocalPath/PreventChange", false, "a1", "a2", func(c *types.ClusterConfig, v any) { + c.LocalStorage.Enabled = vals.Pointer(true) + c.LocalStorage.LocalPath = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("LocalStorage/ReclaimPolicy/AllowChange", true, "Retain", "Delete", func(c *types.ClusterConfig, v any) { + c.LocalStorage.ReclaimPolicy = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("LocalStorage/ReclaimPolicy/PreventChange", false, "Retain", "Delete", func(c *types.ClusterConfig, v any) { + c.LocalStorage.Enabled = vals.Pointer(true) + c.LocalStorage.LocalPath = vals.Pointer("path") + c.LocalStorage.ReclaimPolicy = vals.Pointer(v.(string)) + }), + generateMergeClusterConfigTestCases("LocalStorage/SetDefault/Enable", true, true, false, func(c *types.ClusterConfig, v any) { c.LocalStorage.SetDefault = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("LocalStorage/SetDefault/Disable", true, false, true, func(c *types.ClusterConfig, v any) { c.LocalStorage.SetDefault = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("MetricsServer/Enable", true, true, false, func(c *types.ClusterConfig, v any) { c.MetricsServer.Enabled = vals.Pointer(v.(bool)) }), + generateMergeClusterConfigTestCases("MetricsServer/Disable", true, false, true, func(c *types.ClusterConfig, v any) { c.MetricsServer.Enabled = vals.Pointer(v.(bool)) }), + } { + for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - result, err := mergeValue(tc.old, tc.new, tc.allowChange) + + result, err := types.MergeClusterConfig(tc.old, tc.new) if tc.expectErr { g.Expect(err).ToNot(BeNil()) } else { g.Expect(err).To(BeNil()) - g.Expect(result).To(Equal(tc.expectVal)) + g.Expect(result).To(Equal(tc.expectResult)) } }) } - }) + } +} + +func TestMergeClusterConfig_Scenarios(t *testing.T) { + for _, tc := range []struct { + name string + old types.ClusterConfig + new types.ClusterConfig + expectMerged types.ClusterConfig + expectErr bool + }{ + { + name: "LoadBalancer/NeedNetwork", + old: types.ClusterConfig{ + Network: types.Network{Enabled: vals.Pointer(true)}, + LoadBalancer: types.LoadBalancer{Enabled: vals.Pointer(true)}, + }, + new: types.ClusterConfig{ + Network: types.Network{Enabled: vals.Pointer(false)}, + }, + expectErr: true, + }, + { + name: "LoadBalancer/DisableWithNetwork", + old: types.ClusterConfig{ + Network: types.Network{Enabled: vals.Pointer(true)}, + LoadBalancer: types.LoadBalancer{Enabled: vals.Pointer(true)}, + }, + new: types.ClusterConfig{ + Network: types.Network{Enabled: vals.Pointer(false)}, + LoadBalancer: types.LoadBalancer{Enabled: vals.Pointer(false)}, + }, + expectMerged: types.ClusterConfig{ + Network: types.Network{Enabled: vals.Pointer(false)}, + LoadBalancer: types.LoadBalancer{Enabled: vals.Pointer(false)}, + }, + }, + { + name: "LoadBalancer/MissingBGP", + old: types.ClusterConfig{ + Network: types.Network{Enabled: vals.Pointer(true)}, + LoadBalancer: types.LoadBalancer{Enabled: vals.Pointer(true)}, + }, + new: types.ClusterConfig{ + LoadBalancer: types.LoadBalancer{BGPMode: vals.Pointer(true)}, + }, + expectErr: true, + }, + { + name: "LoadBalancer/InvalidCIDR", + new: types.ClusterConfig{ + LoadBalancer: types.LoadBalancer{ + CIDRs: vals.Pointer([]string{"not-a-cidr"}), + }, + }, + expectErr: true, + }, + { + name: "LocalStorage/InvalidReclaimPolicy", + new: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + ReclaimPolicy: vals.Pointer("Invalid"), + }, + }, + expectErr: true, + }, + { + name: "LocalStorage/EnableAndSetPath", + old: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + LocalPath: vals.Pointer("oldpath"), + }, + }, + new: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("path"), + }, + }, + expectMerged: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("path"), + }, + }, + }, + { + name: "LocalStorage/EnableAndSetReclaimPolicy", + old: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + LocalPath: vals.Pointer("path"), + ReclaimPolicy: vals.Pointer("Delete"), + }, + }, + new: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(true), + ReclaimPolicy: vals.Pointer("Retain"), + }, + }, + expectMerged: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer("path"), + ReclaimPolicy: vals.Pointer("Retain"), + }, + }, + }, + { + name: "LocalStorage/RequirePath", + new: types.ClusterConfig{ + LocalStorage: types.LocalStorage{ + Enabled: vals.Pointer(true), + LocalPath: vals.Pointer(""), + }, + }, + expectErr: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + // defaults for validation + tc.old.SetDefaults() + tc.expectMerged.SetDefaults() + + merged, err := types.MergeClusterConfig(tc.old, tc.new) + if tc.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).To(BeNil()) + g.Expect(merged).To(Equal(tc.expectMerged)) + } + }) + } } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_network.go b/src/k8s/pkg/k8sd/types/cluster_config_network.go new file mode 100644 index 000000000..6cf96f722 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_network.go @@ -0,0 +1,12 @@ +package types + +type Network struct { + Enabled *bool `json:"enabled,omitempty"` + PodCIDR *string `json:"pod-cidr,omitempty"` + ServiceCIDR *string `json:"service-cidr,omitempty"` +} + +func (c Network) GetEnabled() bool { return getField(c.Enabled) } +func (c Network) GetPodCIDR() string { return getField(c.PodCIDR) } +func (c Network) GetServiceCIDR() string { return getField(c.ServiceCIDR) } +func (c Network) Empty() bool { return c.PodCIDR == nil && c.ServiceCIDR == nil } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_test.go b/src/k8s/pkg/k8sd/types/cluster_config_test.go index 5e7814749..06e21a8cb 100644 --- a/src/k8s/pkg/k8sd/types/cluster_config_test.go +++ b/src/k8s/pkg/k8sd/types/cluster_config_test.go @@ -1,228 +1,14 @@ package types_test import ( - "fmt" "testing" - apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/k8sd/types" - "github.com/canonical/k8s/pkg/utils/vals" . "github.com/onsi/gomega" ) -func TestClusterConfigFromBootstrapConfig(t *testing.T) { +func TestClusterConfigEmpty(t *testing.T) { g := NewWithT(t) - bootstrapConfig := apiv1.BootstrapConfig{ - ClusterCIDR: "10.1.0.0/16", - ServiceCIDR: "10.152.183.0/24", - Components: []string{"dns", "network"}, - EnableRBAC: vals.Pointer(true), - K8sDqlitePort: 12345, - } - expectedConfig := types.ClusterConfig{ - APIServer: types.APIServer{ - AuthorizationMode: "Node,RBAC", - }, - Network: types.Network{ - Enabled: vals.Pointer(true), - PodCIDR: "10.1.0.0/16", - ServiceCIDR: "10.152.183.0/24", - }, - K8sDqlite: types.K8sDqlite{ - Port: 12345, - }, - DNS: types.DNS{ - Enabled: vals.Pointer(true), - }, - } - - g.Expect(types.ClusterConfigFromBootstrapConfig(&bootstrapConfig)).To(Equal(expectedConfig)) -} - -func TestValidateCIDR(t *testing.T) { - g := NewWithT(t) - // Create a new BootstrapConfig with default values - validConfig := types.ClusterConfig{ - Network: types.Network{ - PodCIDR: "10.1.0.0/16,2001:0db8::/32", - ServiceCIDR: "10.152.183.0/16", - }, - } - err := validConfig.Validate() - g.Expect(err).To(BeNil()) - - t.Run("InvalidCIDR", func(t *testing.T) { - for _, tc := range []struct { - cidr string - }{ - {cidr: "bananas"}, - {cidr: "fd01::/64,fd02::/64,fd03::/64"}, - } { - t.Run(tc.cidr, func(t *testing.T) { - t.Run("Pod", func(t *testing.T) { - g := NewWithT(t) - config := types.ClusterConfig{ - Network: types.Network{ - PodCIDR: tc.cidr, - }, - } - err := config.Validate() - g.Expect(err).ToNot(BeNil()) - }) - t.Run("Service", func(t *testing.T) { - g := NewWithT(t) - config := types.ClusterConfig{ - Network: types.Network{ - ServiceCIDR: tc.cidr, - }, - } - err := config.Validate() - g.Expect(err).ToNot(BeNil()) - }) - }) - } - }) -} - -func TestUnsetRBAC(t *testing.T) { - g := NewWithT(t) - // Ensure unset rbac yields rbac authz - bootstrapConfig := apiv1.BootstrapConfig{ - EnableRBAC: nil, - } - expectedConfig := types.ClusterConfig{ - APIServer: types.APIServer{ - AuthorizationMode: "Node,RBAC", - }, - } - g.Expect(types.ClusterConfigFromBootstrapConfig(&bootstrapConfig)).To(Equal(expectedConfig)) -} - -func TestFalseRBAC(t *testing.T) { - g := NewWithT(t) - // Ensure false rbac yields open authz - bootstrapConfig := apiv1.BootstrapConfig{ - EnableRBAC: vals.Pointer(false), - } - expectedConfig := types.ClusterConfig{ - APIServer: types.APIServer{ - AuthorizationMode: "AlwaysAllow", - }, - } - g.Expect(types.ClusterConfigFromBootstrapConfig(&bootstrapConfig)).To(Equal(expectedConfig)) -} - -func TestSetDefaults(t *testing.T) { - g := NewWithT(t) - clusterConfig := types.ClusterConfig{} - - // Set defaults - expectedConfig := types.ClusterConfig{ - Network: types.Network{ - PodCIDR: "10.1.0.0/16", - ServiceCIDR: "10.152.183.0/24", - }, - APIServer: types.APIServer{ - SecurePort: 6443, - AuthorizationMode: "Node,RBAC", - }, - K8sDqlite: types.K8sDqlite{ - Port: 9000, - }, - Kubelet: types.Kubelet{ - ClusterDomain: "cluster.local", - }, - DNS: types.DNS{ - UpstreamNameservers: []string{"/etc/resolv.conf"}, - }, - LocalStorage: types.LocalStorage{ - LocalPath: "/var/snap/k8s/common/rawfile-storage", - ReclaimPolicy: "Delete", - SetDefault: vals.Pointer(true), - }, - LoadBalancer: types.LoadBalancer{ - L2Enabled: vals.Pointer(true), - }, - } - - clusterConfig.SetDefaults() - g.Expect(clusterConfig).To(Equal(expectedConfig)) -} - -type mergeClusterConfigTestCase struct { - name string - old types.ClusterConfig - new types.ClusterConfig - expectResult types.ClusterConfig - expectErr bool -} - -func generateMergeClusterConfigTestCases(field string, changeAllowed bool, val1 any, val2 any, update func(*types.ClusterConfig, any)) []mergeClusterConfigTestCase { - var cfgZero, cfgOne, cfgTwo types.ClusterConfig - update(&cfgOne, val1) - update(&cfgTwo, val2) - - return []mergeClusterConfigTestCase{ - { - name: fmt.Sprintf("%s/Set", field), - new: cfgOne, - expectResult: cfgOne, - expectErr: false, - }, - { - name: fmt.Sprintf("%s/Keep", field), - old: cfgOne, - new: cfgZero, - expectResult: cfgOne, - }, - { - name: fmt.Sprintf("%s/Update", field), - old: cfgOne, - new: cfgTwo, - expectResult: cfgTwo, - expectErr: !changeAllowed, - }, - } -} - -func TestMergeClusterConfig(t *testing.T) { - for _, tcs := range [][]mergeClusterConfigTestCase{ - generateMergeClusterConfigTestCases("CACert", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.CACert = v.(string) }), - generateMergeClusterConfigTestCases("CAKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.CAKey = v.(string) }), - generateMergeClusterConfigTestCases("K8sDqliteCert", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.K8sDqliteCert = v.(string) }), - generateMergeClusterConfigTestCases("K8sDqliteKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.K8sDqliteKey = v.(string) }), - generateMergeClusterConfigTestCases("APIServerKubeletClientCert", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.APIServerKubeletClientCert = v.(string) }), - generateMergeClusterConfigTestCases("APIServerKubeletClientKey", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.APIServerKubeletClientKey = v.(string) }), - generateMergeClusterConfigTestCases("FrontProxyCACert", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.FrontProxyCACert = v.(string) }), - generateMergeClusterConfigTestCases("FrontProxyCAKey", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.FrontProxyCAKey = v.(string) }), - generateMergeClusterConfigTestCases("DatastoreCA", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.DatastoreCACert = v.(string) }), - generateMergeClusterConfigTestCases("DatastoreClientCert", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.DatastoreClientCert = v.(string) }), - generateMergeClusterConfigTestCases("DatastoreClientKey", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Certificates.DatastoreClientKey = v.(string) }), - generateMergeClusterConfigTestCases("AuthorizationMode", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.APIServer.AuthorizationMode = v.(string) }), - generateMergeClusterConfigTestCases("ServiceAccountKey", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.APIServer.ServiceAccountKey = v.(string) }), - generateMergeClusterConfigTestCases("PodCIDR", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Network.PodCIDR = v.(string) }), - generateMergeClusterConfigTestCases("ServiceCIDR", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Network.ServiceCIDR = v.(string) }), - generateMergeClusterConfigTestCases("Datastore", false, "v1", "v2", func(c *types.ClusterConfig, v any) { c.APIServer.Datastore = v.(string) }), - generateMergeClusterConfigTestCases("DatastoreURL", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.APIServer.DatastoreURL = v.(string) }), - generateMergeClusterConfigTestCases("ClusterDNS", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Kubelet.ClusterDNS = v.(string) }), - generateMergeClusterConfigTestCases("ClusterDomain", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Kubelet.ClusterDomain = v.(string) }), - generateMergeClusterConfigTestCases("CloudProvider", true, "v1", "v2", func(c *types.ClusterConfig, v any) { c.Kubelet.CloudProvider = v.(string) }), - generateMergeClusterConfigTestCases("SecurePort", false, 6443, 16443, func(c *types.ClusterConfig, v any) { c.APIServer.SecurePort = v.(int) }), - generateMergeClusterConfigTestCases("K8sDqlitePort", false, 6443, 16443, func(c *types.ClusterConfig, v any) { c.K8sDqlite.Port = v.(int) }), - } { - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - - result, err := types.MergeClusterConfig(tc.old, tc.new) - if tc.expectErr { - g.Expect(err).ToNot(BeNil()) - } else { - g.Expect(err).To(BeNil()) - g.Expect(result).To(Equal(tc.expectResult)) - } - }) - } - } + g.Expect(types.ClusterConfig{}.Empty()).To(BeTrue()) } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_util.go b/src/k8s/pkg/k8sd/types/cluster_config_util.go new file mode 100644 index 000000000..6216e642f --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_util.go @@ -0,0 +1,48 @@ +package types + +import ( + "fmt" + "slices" +) + +func mergeField[T comparable](old *T, new *T, allowChange bool) (*T, error) { + // old value is not set, use new + if old == nil { + return new, nil + } + // new value is not set, or same as old + if new == nil || *new == *old { + return old, nil + } + + // both values are not-empty + if !allowChange { + return nil, fmt.Errorf("value has changed") + } + return new, nil +} + +func mergeSliceField[T comparable](old *[]T, new *[]T, allowChange bool) (*[]T, error) { + // old value is not set, use new + if old == nil { + return new, nil + } + // new value is not set, or same as old + if new == nil || slices.Equal(*new, *old) { + return old, nil + } + + // both values are not-empty + if !allowChange { + return nil, fmt.Errorf("value has changed") + } + return new, nil +} + +func getField[T any](val *T) T { + if val != nil { + return *val + } + var zero T + return zero +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_util_test.go b/src/k8s/pkg/k8sd/types/cluster_config_util_test.go new file mode 100644 index 000000000..bd9d377b1 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_util_test.go @@ -0,0 +1,142 @@ +package types + +import ( + "testing" + + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func Test_mergeField(t *testing.T) { + t.Run("string", func(t *testing.T) { + for _, tc := range []struct { + name string + old *string + new *string + allowChange bool + expectErr bool + expectVal *string + }{ + {name: "keep-empty"}, + {name: "set-empty", new: vals.Pointer("val"), expectVal: vals.Pointer("val")}, + {name: "keep-old", old: vals.Pointer("val"), expectVal: vals.Pointer("val")}, + {name: "update", old: vals.Pointer("val"), new: vals.Pointer("newVal"), allowChange: true, expectVal: vals.Pointer("newVal")}, + {name: "update-not-allowed", old: vals.Pointer("val"), new: vals.Pointer("newVal"), expectErr: true}, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + result, err := mergeField(tc.old, tc.new, tc.allowChange) + switch { + case tc.expectErr: + g.Expect(err).ToNot(BeNil()) + case tc.expectVal == nil: + g.Expect(err).To(BeNil()) + g.Expect(result).To(BeNil()) + case tc.expectVal != nil: + g.Expect(err).To(BeNil()) + g.Expect(*result).To(Equal(*tc.expectVal)) + } + }) + } + }) + + t.Run("int", func(t *testing.T) { + for _, tc := range []struct { + name string + old *int + new *int + allowChange bool + expectErr bool + expectVal *int + }{ + {name: "keep-empty"}, + {name: "set-empty", new: vals.Pointer(100), expectVal: vals.Pointer(100)}, + {name: "keep-old", old: vals.Pointer(100), expectVal: vals.Pointer(100)}, + {name: "update", old: vals.Pointer(100), new: vals.Pointer(200), allowChange: true, expectVal: vals.Pointer(200)}, + {name: "update-not-allowed", old: vals.Pointer(100), new: vals.Pointer(200), expectErr: true}, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + result, err := mergeField(tc.old, tc.new, tc.allowChange) + switch { + case tc.expectErr: + g.Expect(err).ToNot(BeNil()) + case tc.expectVal == nil: + g.Expect(err).To(BeNil()) + g.Expect(result).To(BeNil()) + case tc.expectVal != nil: + g.Expect(err).To(BeNil()) + g.Expect(*result).To(Equal(*tc.expectVal)) + } + }) + } + }) + + t.Run("bool", func(t *testing.T) { + for _, tc := range []struct { + name string + old *bool + new *bool + allowChange bool + expectErr bool + expectVal *bool + }{ + {name: "keep-empty"}, + {name: "set-empty", new: vals.Pointer(true), expectVal: vals.Pointer(true)}, + {name: "keep-old", old: vals.Pointer(false), expectVal: vals.Pointer(false)}, + {name: "disable", old: vals.Pointer(true), new: vals.Pointer(false), allowChange: true, expectVal: vals.Pointer(false)}, + {name: "enable", old: vals.Pointer(false), new: vals.Pointer(true), allowChange: true, expectVal: vals.Pointer(true)}, + {name: "disable-not-allowed", old: vals.Pointer(true), new: vals.Pointer(false), expectErr: true}, + {name: "enable-not-allowed", old: vals.Pointer(false), new: vals.Pointer(true), expectErr: true}, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + result, err := mergeField(tc.old, tc.new, tc.allowChange) + switch { + case tc.expectErr: + g.Expect(err).ToNot(BeNil()) + case tc.expectVal == nil: + g.Expect(err).To(BeNil()) + g.Expect(result).To(BeNil()) + case tc.expectVal != nil: + g.Expect(err).To(BeNil()) + g.Expect(*result).To(Equal(*tc.expectVal)) + } + }) + } + }) +} + +func Test_mergeSliceField(t *testing.T) { + t.Run("[]string", func(t *testing.T) { + for _, tc := range []struct { + name string + old *[]string + new *[]string + allowChange bool + expectErr bool + expectVal *[]string + }{ + {name: "keep-empty"}, + {name: "set-empty", new: vals.Pointer([]string{"val"}), expectVal: vals.Pointer([]string{"val"})}, + {name: "keep-old", old: vals.Pointer([]string{"val"}), expectVal: vals.Pointer([]string{"val"})}, + {name: "update", old: vals.Pointer([]string{"val"}), new: vals.Pointer([]string{"newVal"}), allowChange: true, expectVal: vals.Pointer([]string{"newVal"})}, + {name: "update-not-allowed", old: vals.Pointer([]string{"val"}), new: vals.Pointer([]string{"newVal"}), expectErr: true}, + } { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + result, err := mergeSliceField(tc.old, tc.new, tc.allowChange) + switch { + case tc.expectErr: + g.Expect(err).ToNot(BeNil()) + case tc.expectVal == nil: + g.Expect(err).To(BeNil()) + g.Expect(result).To(BeNil()) + case tc.expectVal != nil: + g.Expect(err).To(BeNil()) + g.Expect(*result).To(Equal(*tc.expectVal)) + } + }) + } + }) +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_validate.go b/src/k8s/pkg/k8sd/types/cluster_config_validate.go new file mode 100644 index 000000000..c59902e29 --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_validate.go @@ -0,0 +1,90 @@ +package types + +import ( + "fmt" + "net" + "strings" +) + +func validateCIDRs(cidrString string) error { + cidrs := strings.Split(cidrString, ",") + if v := len(cidrs); v != 1 && v != 2 { + return fmt.Errorf("must contain 1 or 2 CIDRs, but found %d instead", v) + } + for _, cidr := range cidrs { + if _, _, err := net.ParseCIDR(cidr); err != nil { + return fmt.Errorf("%q is not a valid CIDR: %w", cidr, err) + } + } + return nil +} + +// Validate that a ClusterConfig does not have conflicting or incompatible options. +func (c *ClusterConfig) Validate() error { + // check: validate that PodCIDR and ServiceCIDR are configured + if err := validateCIDRs(c.Network.GetPodCIDR()); err != nil { + return fmt.Errorf("invalid pod CIDR: %w", err) + } + if err := validateCIDRs(c.Network.GetServiceCIDR()); err != nil { + return fmt.Errorf("invalid service CIDR: %w", err) + } + + // check: ensure network is enabled if any of ingress, gateway, load-balancer are enabled + if !c.Network.GetEnabled() { + if c.Gateway.GetEnabled() { + return fmt.Errorf("gateway requires network to be enabled") + } + if c.LoadBalancer.GetEnabled() { + return fmt.Errorf("load-balancer requires network to be enabled") + } + if c.Ingress.GetEnabled() { + return fmt.Errorf("ingress requires network to be enabled") + } + } + + // check: load-balancer CIDRs + for _, cidr := range c.LoadBalancer.GetCIDRs() { + if _, _, err := net.ParseCIDR(cidr); err != nil { + return fmt.Errorf("load-balancer.cidrs contains an invalid CIDR %q: %w", cidr, err) + } + } + + // check: load-balancer BGP mode configuration + if c.LoadBalancer.GetBGPMode() { + if c.LoadBalancer.GetBGPLocalASN() == 0 { + return fmt.Errorf("load-balancer.bgp-local-asn must be set when load-balancer.bgp-mode is enabled") + } + if c.LoadBalancer.GetBGPPeerAddress() == "" { + return fmt.Errorf("load-balancer.bgp-peer-address must be set when load-balancer.bgp-mode is enabled") + } + if c.LoadBalancer.GetBGPPeerPort() == 0 { + return fmt.Errorf("load-balancer.bgp-peer-port must be set when load-balancer.bgp-mode is enabled") + } + if c.LoadBalancer.GetBGPPeerASN() == 0 { + return fmt.Errorf("load-balancer.bgp-peer-asn must be set when load-balancer.bgp-mode is enabled") + } + } + + // check: local-storage.reclaim-policy should be one of 3 values + switch c.LocalStorage.GetReclaimPolicy() { + case "", "Retain", "Recycle", "Delete": + default: + return fmt.Errorf("local-storage.reclaim-policy must be one of: Retrain, Recycle, Delete") + } + + // check: local-storage.local-path must be set if enabled + if c.LocalStorage.GetEnabled() && c.LocalStorage.GetLocalPath() == "" { + return fmt.Errorf("local-storage.local-path must be set when local-storage is enabled") + } + + // check: ensure cluster DNS is a valid IP address + if v := c.Kubelet.GetClusterDNS(); v != "" { + if net.ParseIP(v) == nil { + return fmt.Errorf("dns.service-ip must be a valid IP address") + } + + // TODO: ensure dns.service-ip is part of new.Network.ServiceCIDR + } + + return nil +} diff --git a/src/k8s/pkg/k8sd/types/cluster_config_validate_test.go b/src/k8s/pkg/k8sd/types/cluster_config_validate_test.go new file mode 100644 index 000000000..acc93088f --- /dev/null +++ b/src/k8s/pkg/k8sd/types/cluster_config_validate_test.go @@ -0,0 +1,56 @@ +package types_test + +import ( + "testing" + + "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/utils/vals" + . "github.com/onsi/gomega" +) + +func TestValidateCIDR(t *testing.T) { + for _, tc := range []struct { + cidr string + expectErr bool + }{ + {cidr: "10.1.0.0/16"}, + {cidr: "2001:0db8::/32"}, + {cidr: "10.1.0.0/16,2001:0db8::/32"}, + {cidr: "", expectErr: true}, + {cidr: "bananas", expectErr: true}, + {cidr: "fd01::/64,fd02::/64,fd03::/64", expectErr: true}, + } { + t.Run(tc.cidr, func(t *testing.T) { + t.Run("Pod", func(t *testing.T) { + g := NewWithT(t) + config := types.ClusterConfig{ + Network: types.Network{ + PodCIDR: vals.Pointer(tc.cidr), + ServiceCIDR: vals.Pointer("10.1.0.0/16"), + }, + } + err := config.Validate() + if tc.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).To(BeNil()) + } + }) + t.Run("Service", func(t *testing.T) { + g := NewWithT(t) + config := types.ClusterConfig{ + Network: types.Network{ + PodCIDR: vals.Pointer("10.1.0.0/16"), + ServiceCIDR: vals.Pointer(tc.cidr), + }, + } + err := config.Validate() + if tc.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).To(BeNil()) + } + }) + }) + } +} diff --git a/src/k8s/pkg/k8sd/types/node_config.go b/src/k8s/pkg/k8sd/types/node_config.go deleted file mode 100644 index c5a12af94..000000000 --- a/src/k8s/pkg/k8sd/types/node_config.go +++ /dev/null @@ -1,46 +0,0 @@ -package types - -type NodeConfig struct { - CloudProvider *string - ClusterDNS *string - ClusterDomain *string -} - -func NodeConfigFromMap(data map[string]string) NodeConfig { - nodeConfig := NodeConfig{} - - cloudProvider, ok := data["cloud-provider"] - if ok { - nodeConfig.CloudProvider = &cloudProvider - } - - clusterDNS, ok := data["cluster-dns"] - if ok { - nodeConfig.ClusterDNS = &clusterDNS - } - - clusterDomain, ok := data["cluster-domain"] - if ok { - nodeConfig.ClusterDomain = &clusterDomain - } - - return nodeConfig -} - -func MapFromNodeConfig(nodeConfig NodeConfig) map[string]string { - data := make(map[string]string) - - if nodeConfig.CloudProvider != nil { - data["cloud-provider"] = *nodeConfig.CloudProvider - } - - if nodeConfig.ClusterDNS != nil { - data["cluster-dns"] = *nodeConfig.ClusterDNS - } - - if nodeConfig.ClusterDomain != nil { - data["cluster-domain"] = *nodeConfig.ClusterDomain - } - - return data -} diff --git a/src/k8s/pkg/snap/context.go b/src/k8s/pkg/snap/context.go deleted file mode 100644 index ffd7d3253..000000000 --- a/src/k8s/pkg/snap/context.go +++ /dev/null @@ -1,22 +0,0 @@ -package snap - -import "context" - -type snapContextKey struct{} - -// SnapFromContext extracts the snap instance from the provided context. -// A panic is invoked if there is not snap instance in this context. -func SnapFromContext(ctx context.Context) Snap { - snap, ok := ctx.Value(snapContextKey{}).(Snap) - if !ok { - // This should never happen as the main microcluster state context should contain the snap for k8sd. - // Thus, panic is fine here to avoid cumbersome and unnecessary error checks on client side. - panic("There is no snap value in the given context. Make sure that the context is wrapped with snap.ContextWithSnap.") - } - return snap -} - -// ContextWithSnap adds a snap instance to a given context. -func ContextWithSnap(ctx context.Context, snap Snap) context.Context { - return context.WithValue(ctx, snapContextKey{}, snap) -} diff --git a/src/k8s/pkg/snap/snap.go b/src/k8s/pkg/snap/snap.go index 08ac4d6ef..c2e25d3e8 100644 --- a/src/k8s/pkg/snap/snap.go +++ b/src/k8s/pkg/snap/snap.go @@ -196,7 +196,7 @@ func (s *snap) Components() map[string]types.Component { return map[string]types.Component{ "network": { ReleaseName: "ck-network", - ManifestPath: path.Join(s.snapDir, "k8s", "components", "charts", "cilium-1.14.1.tgz"), + ManifestPath: path.Join(s.snapDir, "k8s", "components", "charts", "cilium-1.15.2.tgz"), Namespace: "kube-system", }, "dns": { @@ -213,7 +213,7 @@ func (s *snap) Components() map[string]types.Component { "ingress": {}, "gateway": { ReleaseName: "ck-gateway", - ManifestPath: path.Join(s.snapDir, "k8s", "components", "charts", "gateway-api-0.7.1.tgz"), + ManifestPath: path.Join(s.snapDir, "k8s", "components", "charts", "gateway-api-1.0.0.tgz"), Namespace: "kube-system", }, "load-balancer": { diff --git a/src/k8s/pkg/snap/util/node_test.go b/src/k8s/pkg/snap/util/node_test.go index 5a1b949fc..ba3347fcc 100644 --- a/src/k8s/pkg/snap/util/node_test.go +++ b/src/k8s/pkg/snap/util/node_test.go @@ -18,7 +18,7 @@ func TestIsWorker(t *testing.T) { } t.Run("WorkerFileExists", func(t *testing.T) { - g := NewGomegaWithT(t) + g := NewWithT(t) fname := path.Join(mock.LockFilesDir(), "worker") lock, err := os.Create(fname) @@ -32,7 +32,7 @@ func TestIsWorker(t *testing.T) { t.Run("WorkerFileNotExists", func(t *testing.T) { mock.Mock.LockFilesDir = "/non-existent" - g := NewGomegaWithT(t) + g := NewWithT(t) exists, err := snaputil.IsWorker(mock) g.Expect(err).To(BeNil()) g.Expect(exists).To(BeFalse()) @@ -50,7 +50,7 @@ func TestMarkAsWorkerNode(t *testing.T) { } t.Run("MarkWorker", func(t *testing.T) { - g := NewGomegaWithT(t) + g := NewWithT(t) err := snaputil.MarkAsWorkerNode(mock, true) g.Expect(err).To(BeNil()) @@ -63,7 +63,7 @@ func TestMarkAsWorkerNode(t *testing.T) { }) t.Run("UnmarkWorker", func(t *testing.T) { - g := NewGomegaWithT(t) + g := NewWithT(t) workerFile := path.Join(mock.LockFilesDir(), "worker") _, err := os.Create(workerFile) g.Expect(err).To(BeNil()) @@ -86,28 +86,28 @@ func TestMarkAsWorkerNode_ErrorCases(t *testing.T) { t.Run("FailedToCreateWorkerFile", func(t *testing.T) { mock.Mock.LockFilesDir = "/non-existent" - g := NewGomegaWithT(t) + g := NewWithT(t) err := snaputil.MarkAsWorkerNode(mock, true) g.Expect(err).To(HaveOccurred()) }) t.Run("FailedToRemoveWorkerFile", func(t *testing.T) { mock.Mock.LockFilesDir = "/non-existent" - g := NewGomegaWithT(t) + g := NewWithT(t) err := snaputil.MarkAsWorkerNode(mock, false) g.Expect(err).To(HaveOccurred()) }) t.Run("FailedToChownWorkerFile", func(t *testing.T) { mock.Mock.UID = -1 // Invalid UID to cause chown failure - g := NewGomegaWithT(t) + g := NewWithT(t) err := snaputil.MarkAsWorkerNode(mock, true) g.Expect(err).To(HaveOccurred()) }) t.Run("FailedToChmodWorkerFile", func(t *testing.T) { mock.Mock.LockFilesDir = "/non-existent" - g := NewGomegaWithT(t) + g := NewWithT(t) err := snaputil.MarkAsWorkerNode(mock, true) g.Expect(err).To(HaveOccurred()) }) diff --git a/src/k8s/pkg/snap/util/services_test.go b/src/k8s/pkg/snap/util/services_test.go index 273d1fee9..203012d81 100644 --- a/src/k8s/pkg/snap/util/services_test.go +++ b/src/k8s/pkg/snap/util/services_test.go @@ -13,7 +13,7 @@ func TestStartWorkerServices(t *testing.T) { mock := &mock.Snap{ Mock: mock.Mock{}, } - g := NewGomegaWithT(t) + g := NewWithT(t) mock.StartServiceErr = fmt.Errorf("service start failed") @@ -33,7 +33,7 @@ func TestStartControlPlaneServices(t *testing.T) { mock := &mock.Snap{ Mock: mock.Mock{}, } - g := NewGomegaWithT(t) + g := NewWithT(t) mock.StartServiceErr = fmt.Errorf("service start failed") @@ -53,7 +53,7 @@ func TestStartK8sDqliteServices(t *testing.T) { mock := &mock.Snap{ Mock: mock.Mock{}, } - g := NewGomegaWithT(t) + g := NewWithT(t) mock.StartServiceErr = fmt.Errorf("service start failed") @@ -73,7 +73,7 @@ func TestStopControlPlaneServices(t *testing.T) { mock := &mock.Snap{ Mock: mock.Mock{}, } - g := NewGomegaWithT(t) + g := NewWithT(t) mock.StopServiceErr = fmt.Errorf("service stop failed") @@ -93,7 +93,7 @@ func TestStopK8sDqliteServices(t *testing.T) { mock := &mock.Snap{ Mock: mock.Mock{}, } - g := NewGomegaWithT(t) + g := NewWithT(t) mock.StopServiceErr = fmt.Errorf("service stop failed") diff --git a/src/k8s/pkg/utils/database.go b/src/k8s/pkg/utils/database.go index d8a60043d..38ddf18d6 100644 --- a/src/k8s/pkg/utils/database.go +++ b/src/k8s/pkg/utils/database.go @@ -5,10 +5,8 @@ import ( "database/sql" "fmt" - apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/k8sd/database" "github.com/canonical/k8s/pkg/k8sd/types" - "github.com/canonical/k8s/pkg/utils/vals" "github.com/canonical/microcluster/state" ) @@ -30,99 +28,6 @@ func GetClusterConfig(ctx context.Context, state *state.State) (types.ClusterCon return clusterConfig, nil } -// GetUserFacingClusterConfig returns the public cluster config. -func GetUserFacingClusterConfig(ctx context.Context, state *state.State) (apiv1.UserFacingClusterConfig, error) { - cfg, err := GetClusterConfig(ctx, state) - if err != nil { - return apiv1.UserFacingClusterConfig{}, fmt.Errorf("failed to get cluster config: %w", err) - } - - userFacing := apiv1.UserFacingClusterConfig{ - Network: &apiv1.NetworkConfig{ - Enabled: vals.Pointer(true), - }, - DNS: &apiv1.DNSConfig{ - Enabled: vals.Pointer(true), - UpstreamNameservers: cfg.DNS.UpstreamNameservers, - ServiceIP: cfg.Kubelet.ClusterDNS, - ClusterDomain: cfg.Kubelet.ClusterDomain, - }, - Ingress: &apiv1.IngressConfig{ - Enabled: vals.Pointer(false), - DefaultTLSSecret: cfg.Ingress.DefaultTLSSecret, - EnableProxyProtocol: vals.Pointer(false), - }, - LoadBalancer: &apiv1.LoadBalancerConfig{ - Enabled: vals.Pointer(false), - CIDRs: cfg.LoadBalancer.CIDRs, - L2Enabled: vals.Pointer(false), - L2Interfaces: cfg.LoadBalancer.L2Interfaces, - BGPEnabled: vals.Pointer(false), - BGPLocalASN: cfg.LoadBalancer.BGPLocalASN, - BGPPeerAddress: cfg.LoadBalancer.BGPPeerAddress, - BGPPeerASN: cfg.LoadBalancer.BGPPeerASN, - BGPPeerPort: cfg.LoadBalancer.BGPPeerPort, - }, - LocalStorage: &apiv1.LocalStorageConfig{ - Enabled: vals.Pointer(false), - LocalPath: cfg.LocalStorage.LocalPath, - ReclaimPolicy: cfg.LocalStorage.ReclaimPolicy, - SetDefault: vals.Pointer(true), - }, - Gateway: &apiv1.GatewayConfig{ - Enabled: vals.Pointer(false), - }, - MetricsServer: &apiv1.MetricsServerConfig{ - Enabled: vals.Pointer(false), - }, - } - - if cfg.Network.Enabled != nil { - userFacing.Network.Enabled = cfg.Network.Enabled - } - - if cfg.DNS.Enabled != nil { - userFacing.DNS.Enabled = cfg.DNS.Enabled - } - - if cfg.Ingress.Enabled != nil { - userFacing.Ingress.Enabled = cfg.Ingress.Enabled - } - - if cfg.LoadBalancer.Enabled != nil { - userFacing.LoadBalancer.Enabled = cfg.LoadBalancer.Enabled - } - - if cfg.LocalStorage.Enabled != nil { - userFacing.LocalStorage.Enabled = cfg.LocalStorage.Enabled - } - - if cfg.Gateway.Enabled != nil { - userFacing.Gateway.Enabled = cfg.Gateway.Enabled - } - - if cfg.MetricsServer.Enabled != nil { - userFacing.MetricsServer.Enabled = cfg.MetricsServer.Enabled - } - - if cfg.Ingress.EnableProxyProtocol != nil { - userFacing.Ingress.EnableProxyProtocol = cfg.Ingress.EnableProxyProtocol - } - - if cfg.LoadBalancer.L2Enabled != nil { - userFacing.LoadBalancer.L2Enabled = cfg.LoadBalancer.L2Enabled - } - - if cfg.LoadBalancer.BGPEnabled != nil { - userFacing.LoadBalancer.BGPEnabled = cfg.LoadBalancer.BGPEnabled - } - - if cfg.LocalStorage.SetDefault != nil { - userFacing.LocalStorage.SetDefault = cfg.LocalStorage.SetDefault - } - return userFacing, nil -} - // CheckWorkerExists is a convenience wrapper around the database call to check if a worker node entry exists. func CheckWorkerExists(ctx context.Context, state *state.State, name string) (bool, error) { var exists bool diff --git a/src/k8s/pkg/utils/errors/errors_test.go b/src/k8s/pkg/utils/errors/errors_test.go index 324255ee1..3562695c5 100644 --- a/src/k8s/pkg/utils/errors/errors_test.go +++ b/src/k8s/pkg/utils/errors/errors_test.go @@ -9,7 +9,7 @@ import ( ) func TestDeeplyUnwrapError(t *testing.T) { - g := gomega.NewGomegaWithT(t) + g := gomega.NewWithT(t) t.Run("when error is not wrapped", func(t *testing.T) { err := errors.New("test error") diff --git a/src/k8s/pkg/utils/file.go b/src/k8s/pkg/utils/file.go index 998b1847b..58fa18bca 100644 --- a/src/k8s/pkg/utils/file.go +++ b/src/k8s/pkg/utils/file.go @@ -123,17 +123,6 @@ func FileExists(path ...string) (bool, error) { return true, nil } -// ValueInSlice returns true if key is in list. -func ValueInSlice[T comparable](key T, list []T) bool { - for _, entry := range list { - if entry == key { - return true - } - } - - return false -} - var ErrUnknownMount = errors.New("mount is unknown") // GetMountPath returns the first mountpath for a given filesystem type. diff --git a/src/k8s/pkg/utils/k8s/endpoints.go b/src/k8s/pkg/utils/k8s/endpoints.go index fbc29c0a3..8277426b9 100644 --- a/src/k8s/pkg/utils/k8s/endpoints.go +++ b/src/k8s/pkg/utils/k8s/endpoints.go @@ -5,22 +5,32 @@ import ( "fmt" "sort" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" ) // GetKubeAPIServerEndpoints retrieves the known kube-apiserver endpoints of the cluster. // GetKubeAPIServerEndpoints returns an error if the list of endpoints is empty. func (c *Client) GetKubeAPIServerEndpoints(ctx context.Context) ([]string, error) { - endpoint, err := c.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) + var endpoints *v1.Endpoints + var err error + err = retry.OnError(retry.DefaultBackoff, func(err error) bool { return true }, func() error { + endpoints, err = c.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) + if err != nil { + return err + } + return nil + }) if err != nil { return nil, fmt.Errorf("failed to get endpoints for kubernetes service: %w", err) } - if endpoint == nil { + if endpoints == nil { return nil, fmt.Errorf("endpoints for kubernetes service not found") } - addresses := make([]string, 0, len(endpoint.Subsets)) - for _, subset := range endpoint.Subsets { + addresses := make([]string, 0, len(endpoints.Subsets)) + for _, subset := range endpoints.Subsets { portNumber := 6443 for _, port := range subset.Ports { if port.Name == "https" { diff --git a/src/k8s/pkg/utils/k8s/node_test.go b/src/k8s/pkg/utils/k8s/node_test.go index e72c99d77..3baef9b82 100644 --- a/src/k8s/pkg/utils/k8s/node_test.go +++ b/src/k8s/pkg/utils/k8s/node_test.go @@ -15,7 +15,7 @@ import ( ) func TestDeleteNode(t *testing.T) { - g := gomega.NewGomegaWithT(t) + g := gomega.NewWithT(t) t.Run("node deletion is successful", func(t *testing.T) { clientset := fake.NewSimpleClientset() diff --git a/tests/integration/requirements-dev.txt b/tests/integration/requirements-dev.txt index d3be1dfd2..a66721ae0 100644 --- a/tests/integration/requirements-dev.txt +++ b/tests/integration/requirements-dev.txt @@ -1,4 +1,4 @@ -black==23.3.0 +black==24.3.0 codespell==2.2.4 flake8==6.0.0 isort==5.12.0 diff --git a/tests/integration/tests/test_cilium_e2e.py b/tests/integration/tests/test_cilium_e2e.py index 4d4959477..cf2735eac 100644 --- a/tests/integration/tests/test_cilium_e2e.py +++ b/tests/integration/tests/test_cilium_e2e.py @@ -12,7 +12,7 @@ ARCH = platform.machine() CILIUM_CLI_ARCH_MAP = {"aarch64": "arm64", "x86_64": "amd64"} -CILIUM_CLI_VERSION = "v0.15.19" +CILIUM_CLI_VERSION = "v0.16.3" CILIUM_CLI_TAR_GZ = f"https://github.com/cilium/cilium-cli/releases/download/{CILIUM_CLI_VERSION}/cilium-linux-{CILIUM_CLI_ARCH_MAP.get(ARCH)}.tar.gz" # noqa diff --git a/tests/integration/tests/test_clustering.py b/tests/integration/tests/test_clustering.py index a8fd035ee..5a4e03abe 100644 --- a/tests/integration/tests/test_clustering.py +++ b/tests/integration/tests/test_clustering.py @@ -11,7 +11,7 @@ @pytest.mark.node_count(2) -def test_clustering(instances: List[harness.Instance]): +def test_control_plane_nodes(instances: List[harness.Instance]): cluster_node = instances[0] joining_node = instances[1] diff --git a/tests/integration/tests/test_gateway.py b/tests/integration/tests/test_gateway.py index f31bc0f83..93faa65d0 100644 --- a/tests/integration/tests/test_gateway.py +++ b/tests/integration/tests/test_gateway.py @@ -91,8 +91,6 @@ def test_gateway(instances: List[harness.Instance]): ) gateway_http_port = p.stdout.decode().replace("'", "") - p = instance.exec( - ["curl", f"localhost:{gateway_http_port}"], - capture_output=True, - ) - assert "Welcome to nginx!" in p.stdout.decode() + util.stubbornly(retries=5, delay_s=5).on(instance).until( + lambda p: "Welcome to nginx!" in p.stdout.decode() + ).exec(["curl", f"localhost:{gateway_http_port}"]) diff --git a/tests/integration/tests/test_ingress.py b/tests/integration/tests/test_ingress.py index 13554de74..bfa19bc8c 100644 --- a/tests/integration/tests/test_ingress.py +++ b/tests/integration/tests/test_ingress.py @@ -95,8 +95,6 @@ def test_ingress(instances: List[harness.Instance]): ] ) - p = instance.exec( - ["curl", f"localhost:{ingress_http_port}", "-H", "Host: foo.bar.com"], - capture_output=True, - ) - assert "Welcome to nginx!" in p.stdout.decode() + util.stubbornly(retries=5, delay_s=5).on(instance).until( + lambda p: "Welcome to nginx!" in p.stdout.decode() + ).exec(["curl", f"localhost:{ingress_http_port}", "-H", "Host: foo.bar.com"]) diff --git a/tests/integration/tests/test_loadbalancer.py b/tests/integration/tests/test_loadbalancer.py index 1c8c6d89e..6c1740676 100644 --- a/tests/integration/tests/test_loadbalancer.py +++ b/tests/integration/tests/test_loadbalancer.py @@ -74,7 +74,9 @@ def test_loadbalancer(instances: List[harness.Instance]): excluded_ips=[instance_default_ip, tester_instance_default_ip], ) - instance.exec(["k8s", "set", f"load-balancer.cidrs={lb_cidr}"]) + instance.exec( + ["k8s", "set", f"load-balancer.cidrs={lb_cidr}", "load-balancer.l2-mode=true"] + ) instance.exec(["k8s", "enable", "load-balancer"]) util.stubbornly(retries=3, delay_s=1).on(instance).exec(