From 74cf293378859cfcd7a3701bcf7fe945f3925bb1 Mon Sep 17 00:00:00 2001 From: Mohamed Chiheb Ben Jemaa Date: Mon, 3 Jun 2024 16:39:53 +0200 Subject: [PATCH] =?UTF-8?q?=F0=9F=94=A5=20Add=20external=20credentials=20(?= =?UTF-8?q?#215)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/v1alpha1/proxmoxcluster_types.go | 44 +++ api/v1alpha1/zz_generated.deepcopy.go | 15 + cmd/main.go | 27 +- ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 46 +++ ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 16 ++ .../default/proxmox-credentials-secret.yaml | 8 +- docs/Usage.md | 49 +++- pkg/scope/cluster.go | 68 ++++- pkg/scope/cluster_test.go | 106 ++++++- .../cluster-template-external-creds.yaml | 263 ++++++++++++++++++ 10 files changed, 607 insertions(+), 35 deletions(-) create mode 100644 templates/cluster-template-external-creds.yaml diff --git a/api/v1alpha1/proxmoxcluster_types.go b/api/v1alpha1/proxmoxcluster_types.go index f2019131..42f09431 100644 --- a/api/v1alpha1/proxmoxcluster_types.go +++ b/api/v1alpha1/proxmoxcluster_types.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -71,6 +72,11 @@ type ProxmoxClusterSpec struct { // in the configuration and cloning of a proxmox VM. Multiple types of nodes can be specified. // +optional CloneSpec *ProxmoxClusterCloneSpec `json:"cloneSpec,omitempty"` + + // CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not + // supplied then the credentials of the controller will be used. + // +optional + CredentialsRef *corev1.SecretReference `json:"credentialsRef,omitempty"` } // ProxmoxClusterCloneSpec is the configuration pertaining to all items configurable @@ -141,6 +147,44 @@ type ProxmoxClusterStatus struct { // +optional NodeLocations *NodeLocations `json:"nodeLocations,omitempty"` + // FailureReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of ProxmoxCluster + // can be added as events to the ProxmoxCluster object and/or logged in the + // controller's output. + // +optional + FailureReason *errors.ClusterStatusError `json:"failureReason,omitempty"` + + // FailureMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of ProxmoxMachines + // can be added as events to the ProxmoxCluster object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + // Conditions defines current service state of the ProxmoxCluster. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index e049639a..83e27c55 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -354,6 +354,11 @@ func (in *ProxmoxClusterSpec) DeepCopyInto(out *ProxmoxClusterSpec) { *out = new(ProxmoxClusterCloneSpec) (*in).DeepCopyInto(*out) } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.SecretReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterSpec. @@ -379,6 +384,16 @@ func (in *ProxmoxClusterStatus) DeepCopyInto(out *ProxmoxClusterStatus) { *out = new(NodeLocations) (*in).DeepCopyInto(*out) } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(errors.ClusterStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(v1beta1.Conditions, len(*in)) diff --git a/cmd/main.go b/cmd/main.go index 00219971..12f4af08 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -27,7 +27,6 @@ import ( "github.com/go-logr/logr" "github.com/luthermonson/go-proxmox" - "github.com/pkg/errors" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -185,8 +184,13 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, client capmox.Clien } func setupProxmoxClient(ctx context.Context, logger logr.Logger) (capmox.Client, error) { + // we return nil if the env variables are not set + // so the proxmoxcontroller can create the client later from spec.credentialsRef + // or fail the cluster if no credentials found + if ProxmoxURL == "" || ProxmoxTokenID == "" || ProxmoxSecret == "" { + return nil, nil + } // TODO, check if we need to delete tls config - // You can disable security check for a client: tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec } @@ -214,23 +218,4 @@ func initFlagsAndEnv(fs *pflag.FlagSet) { "If true, run webhook server alongside manager") feature.MutableGates.AddFlag(fs) - - err := validate() - if err != nil { - setupLog.Error(err, "validate fails") - os.Exit(1) - } -} - -func validate() error { - if ProxmoxURL == "" { - return errors.New("required variable `PROXMOX_URL` is not set") - } - if ProxmoxTokenID == "" { - return errors.New("required variable `PROXMOX_TOKEN` is not set") - } - if ProxmoxSecret == "" { - return errors.New("required variable `PROXMOX_SECRET` is not set") - } - return nil } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index ba648320..7099caa8 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -561,6 +561,21 @@ spec: - host - port type: object + credentialsRef: + description: CredentialsRef is a reference to a Secret that contains + the credentials to use for provisioning this cluster. If not supplied + then the credentials of the controller will be used. + properties: + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic dnsServers: description: DNSServers contains information about nameservers used by the machines. @@ -690,6 +705,37 @@ spec: - type type: object type: array + failureMessage: + description: "FailureMessage will be set in the event that there is + a terminal problem reconciling the Machine and will contain a more + verbose string suitable for logging and human consumption. \n This + field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over time (like + service outages), but instead indicate that something is fundamentally + wrong with the Machine's spec or the configuration of the controller, + and that manual intervention is required. Examples of terminal errors + would be invalid combinations of settings in the spec, values that + are unsupported by the controller, or the responsible controller + itself being critically misconfigured. \n Any transient errors that + occur during the reconciliation of ProxmoxMachines can be added + as events to the ProxmoxCluster object and/or logged in the controller's + output." + type: string + failureReason: + description: "FailureReason will be set in the event that there is + a terminal problem reconciling the Machine and will contain a succinct + value suitable for machine interpretation. \n This field should + not be set for transitive errors that a controller faces that are + expected to be fixed automatically over time (like service outages), + but instead indicate that something is fundamentally wrong with + the Machine's spec or the configuration of the controller, and that + manual intervention is required. Examples of terminal errors would + be invalid combinations of settings in the spec, values that are + unsupported by the controller, or the responsible controller itself + being critically misconfigured. \n Any transient errors that occur + during the reconciliation of ProxmoxCluster can be added as events + to the ProxmoxCluster object and/or logged in the controller's output." + type: string inClusterIpPoolRef: description: InClusterIPPoolRef is the reference to the created in-cluster IP pool. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 44231824..fe696561 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -610,6 +610,22 @@ spec: - host - port type: object + credentialsRef: + description: CredentialsRef is a reference to a Secret that + contains the credentials to use for provisioning this cluster. + If not supplied then the credentials of the controller will + be used. + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic dnsServers: description: DNSServers contains information about nameservers used by the machines. diff --git a/config/default/proxmox-credentials-secret.yaml b/config/default/proxmox-credentials-secret.yaml index 66113ca1..6855e699 100644 --- a/config/default/proxmox-credentials-secret.yaml +++ b/config/default/proxmox-credentials-secret.yaml @@ -1,9 +1,11 @@ --- apiVersion: v1 stringData: - secret: ${PROXMOX_SECRET} - token: ${PROXMOX_TOKEN} - url: ${PROXMOX_URL} + secret: ${PROXMOX_SECRET=""} + token: ${PROXMOX_TOKEN=""} + url: ${PROXMOX_URL=""} kind: Secret metadata: name: manager-credentials + labels: + platform.ionos.com/secret-type: "proxmox-credentials" diff --git a/docs/Usage.md b/docs/Usage.md index d0a9492f..67c8650f 100644 --- a/docs/Usage.md +++ b/docs/Usage.md @@ -120,8 +120,11 @@ Once you have access to a management cluster, you can initialize Cluster API wit clusterctl init --infrastructure proxmox --ipam in-cluster --core cluster-api:v1.6.1 ``` +**Note:** The Proxmox credentials are optional when installing the provider, +but they are required when creating a cluster. + ### Create a Workload Cluster -In order to create a new cluster, you need to generate a cluster manifest. +To create a new cluster, you need to generate a cluster manifest. ```bash $ clusterctl generate cluster proxmox-quickstart \ @@ -167,15 +170,57 @@ For templates using `CNI`s you're required to create `ConfigMaps` to make `Clust We provide the following templates: | Flavor | Tepmlate File | CRS File | -|---------------------| -----------------------------------------------------|-----------------------------------------------------------| +|---------------------|------------------------------------------------------|-----------------------------------------------------------| | cilium | templates/cluster-template-cilium.yaml | templates/crs/cni/cilium.yaml | | calico | templates/cluster-template-calico.yaml | templates/crs/cni/calico.yaml | | multiple-vlans | templates/cluster-template-multiple-vlans.yaml | - | | default | templates/cluster-template.yaml | - | | cilium loadbalancer | templates/cluster-template-cilium-load-balancer.yaml | templates/crs/cni/cilium.yaml, templates/crs/metallb.yaml | +| external-creds | templates/cluster-template-external-creds.yaml | | For more information about advanced clusters please check our [advanced setups docs](advanced-setups.md). +#### External Credentials + +The `external-creds` flavor is used to create a cluster with external credentials. +This is useful when you want to use different Proxmox Datacenters. + +you will need these environment variables to generate a cluster with external credentials: + +```env +PROXMOX_URL: "https://pve.example:8006" # The Proxmox VE host +PROXMOX_TOKEN: "root@pam!capi" # The Proxmox VE TokenID for authentication +PROXMOX_SECRET: "REDACTED" # The secret associated with the TokenID +``` + +However, to use external-credentials in your own Cluster manifests, you need to create a secret +and reference it in the cluster manifest. +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxCluster +metadata: + name: "my-cluster" +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + # ... + credentialsRef: + name: "my-cluster-proxmox-credentials" +--- +apiVersion: v1 +stringData: + secret: ${PROXMOX_SECRET} + token: ${PROXMOX_TOKEN} + url: ${PROXMOX_URL} +kind: Secret +metadata: + name: my-cluster-proxmox-credentials + labels: + # Custom IONOS Label + platform.ionos.com/secret-type: "proxmox-credentials" +``` + #### Flavor with Cilium CNI Before this cluster can be deployed, `cilium` needs to be configured. As a first step we need to generate a manifest. Simply use our makefile: diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go index 7dee8fba..0551946f 100644 --- a/pkg/scope/cluster.go +++ b/pkg/scope/cluster.go @@ -19,10 +19,17 @@ package scope import ( "context" + "crypto/tls" + "net/http" "github.com/go-logr/logr" + "github.com/luthermonson/go-proxmox" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clustererrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -30,7 +37,8 @@ import ( infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" - "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" + capmox "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" ) // ClusterScopeParams defines the input parameters used to create a new Scope. @@ -39,7 +47,7 @@ type ClusterScopeParams struct { Logger *logr.Logger Cluster *clusterv1.Cluster ProxmoxCluster *infrav1alpha1.ProxmoxCluster - ProxmoxClient proxmox.Client + ProxmoxClient capmox.Client ControllerName string IPAMHelper *ipam.Helper } @@ -53,7 +61,7 @@ type ClusterScope struct { Cluster *clusterv1.Cluster ProxmoxCluster *infrav1alpha1.ProxmoxCluster - ProxmoxClient proxmox.Client + ProxmoxClient capmox.Client controllerName string IPAMHelper *ipam.Helper @@ -74,9 +82,6 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { if params.IPAMHelper == nil { return nil, errors.New("IPAMHelper is required when creating a ClusterScope") } - if params.ProxmoxClient == nil { - return nil, errors.New("ProxmoxClient is required when creating a ClusterScope") - } if params.Logger == nil { logger := log.FromContext(context.Background()) params.Logger = &logger @@ -99,9 +104,60 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { clusterScope.patchHelper = helper + if clusterScope.ProxmoxClient == nil && clusterScope.ProxmoxCluster.Spec.CredentialsRef == nil { + // Fail the cluster if no credentials found. + // set failure reason + clusterScope.ProxmoxCluster.Status.FailureMessage = ptr.To("No credentials found, ProxmoxCluster missing credentialsRef") + clusterScope.ProxmoxCluster.Status.FailureReason = ptr.To(clustererrors.InvalidConfigurationClusterError) + + if err = clusterScope.Close(); err != nil { + return nil, err + } + return nil, errors.New("No credentials found, ProxmoxCluster missing credentialsRef") + } else if clusterScope.ProxmoxCluster.Spec.CredentialsRef != nil { + // using proxmoxcluster.spec.credentialsRef + pmoxClient, err := clusterScope.setupProxmoxClient(context.TODO()) + if err != nil { + return nil, errors.Wrap(err, "Unable to initialize ProxmoxClient") + } + clusterScope.ProxmoxClient = pmoxClient + } + return clusterScope, nil } +func (s *ClusterScope) setupProxmoxClient(ctx context.Context) (capmox.Client, error) { + // get the credentials secret + secret := corev1.Secret{} + err := s.client.Get(ctx, client.ObjectKey{ + Namespace: s.ProxmoxCluster.Spec.CredentialsRef.Namespace, + Name: s.ProxmoxCluster.Spec.CredentialsRef.Name, + }, &secret) + if err != nil { + if apierrors.IsNotFound(err) { + // set failure reason + s.ProxmoxCluster.Status.FailureMessage = ptr.To("credentials secret not found") + s.ProxmoxCluster.Status.FailureReason = ptr.To(clustererrors.InvalidConfigurationClusterError) + } + return nil, errors.Wrap(err, "failed to get credentials secret") + } + + token := string(secret.Data["token"]) + tokenSecret := string(secret.Data["secret"]) + url := string(secret.Data["url"]) + + // TODO, check if we need to delete tls config + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec + } + + httpClient := &http.Client{Transport: tr} + return goproxmox.NewAPIClient(ctx, *s.Logger, url, + proxmox.WithHTTPClient(httpClient), + proxmox.WithAPIToken(token, tokenSecret), + ) +} + // Name returns the CAPI cluster name. func (s *ClusterScope) Name() string { return s.Cluster.Name diff --git a/pkg/scope/cluster_test.go b/pkg/scope/cluster_test.go index 5884d4b8..16cf9775 100644 --- a/pkg/scope/cluster_test.go +++ b/pkg/scope/cluster_test.go @@ -17,14 +17,23 @@ limitations under the License. package scope import ( + "context" "testing" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" - "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" - "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clustererrors "sigs.k8s.io/cluster-api/errors" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" ) func TestNewClusterScope_MissingParams(t *testing.T) { @@ -48,3 +57,94 @@ func TestNewClusterScope_MissingParams(t *testing.T) { }) } } + +func TestNewClusterScope_MissingProxmoxClient(t *testing.T) { + client := getFakeClient(t) + + proxmoxCluster := &infrav1alpha1.ProxmoxCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: infrav1alpha1.GroupVersion.String(), + Kind: "ProxmoxCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "proxmoxcluster", + Namespace: "default", + }, + Spec: infrav1alpha1.ProxmoxClusterSpec{ + AllowedNodes: []string{"pve", "pve-2"}, + }, + } + + tests := []struct { + name string + params ClusterScopeParams + }{ + {"missing proxmox client in ref", ClusterScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, ProxmoxCluster: proxmoxCluster, IPAMHelper: &ipam.Helper{}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := NewClusterScope(test.params) + require.Error(t, err) + require.Equal(t, proxmoxCluster.Status.FailureReason, ptr.To(clustererrors.InvalidConfigurationClusterError)) + }) + } +} + +func TestNewClusterScope_SetupProxmoxClient(t *testing.T) { + client := getFakeClient(t) + + proxmoxCluster := &infrav1alpha1.ProxmoxCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: infrav1alpha1.GroupVersion.String(), + Kind: "ProxmoxCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "proxmoxcluster", + Namespace: "default", + }, + Spec: infrav1alpha1.ProxmoxClusterSpec{ + AllowedNodes: []string{"pve", "pve-2"}, + CredentialsRef: &corev1.SecretReference{ + Name: "test-secret", + Namespace: "default", + }, + }, + } + + params := ClusterScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, ProxmoxCluster: proxmoxCluster, IPAMHelper: &ipam.Helper{}} + _, err := NewClusterScope(params) + require.Error(t, err) + + creds := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + StringData: map[string]string{ + "url": "https://localhost:8006", + "token": "test-token", + "secret": "test-secret", + }, + } + + err = client.Create(context.Background(), &creds) + require.NoError(t, err) + + _, err = NewClusterScope(params) + require.Error(t, err) +} + +func getFakeClient(t *testing.T) ctrlclient.Client { + scheme := runtime.NewScheme() + + // Register client-go scheme with the scheme + err := clientgoscheme.AddToScheme(scheme) + require.NoError(t, err) + err = clusterv1.AddToScheme(scheme) + require.NoError(t, err) + err = infrav1alpha1.AddToScheme(scheme) + require.NoError(t, err) + + return fake.NewClientBuilder().WithScheme(scheme).Build() +} diff --git a/templates/cluster-template-external-creds.yaml b/templates/cluster-template-external-creds.yaml new file mode 100644 index 00000000..a605f713 --- /dev/null +++ b/templates/cluster-template-external-creds.yaml @@ -0,0 +1,263 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + ipv4Config: + addresses: ${NODE_IP_RANGES} + prefix: ${IP_PREFIX} + gateway: ${GATEWAY} + dnsServers: ${DNS_SERVERS} + allowedNodes: ${ALLOWED_NODES:=[]} + credentialsRef: + name: "${CLUSTER_NAME}-proxmox-credentials" +--- +apiVersion: v1 +stringData: + secret: ${PROXMOX_SECRET} + token: ${PROXMOX_TOKEN} + url: ${PROXMOX_URL} +kind: Secret +metadata: + name: "${CLUSTER_NAME}"-proxmox-credentials + labels: + platform.ionos.com/secret-type: "proxmox-credentials" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: ProxmoxMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.7.1 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - localhost + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - path: /etc/kube-vip-prepare.sh + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + permissions: "0700" + preKubeadmCommands: + - /etc/kube-vip-prepare.sh + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" + version: "${KUBERNETES_VERSION}" +--- +kind: ProxmoxMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + sourceNode: "${PROXMOX_SOURCENODE}" + templateID: ${TEMPLATE_VMID} + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + sourceNode: "${PROXMOX_SOURCENODE}" + templateID: ${TEMPLATE_VMID} + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'"