From ed2d3ce77b4b48960fc40f5edcd00b2c4f1bd3e5 Mon Sep 17 00:00:00 2001 From: Philipp Born Date: Thu, 28 Nov 2024 15:07:50 +0100 Subject: [PATCH] feat: allow template selection based on tags * `sourceNode + templateID` and `templateSelector` are mutually exclusive * automatically detects both `sourceNode` + `templateID` * errors out if anything but one (1) VM template with desired flags was found --- api/v1alpha1/proxmoxmachine_types.go | 25 +- api/v1alpha1/zz_generated.deepcopy.go | 25 ++ ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 16 +- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 16 +- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 15 +- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 16 +- docs/advanced-setups.md | 7 + envfile.example | 1 + internal/service/vmservice/vm.go | 11 + internal/webhook/proxmoxmachine_webhook.go | 38 +++ .../webhook/proxmoxmachine_webhook_test.go | 9 + pkg/proxmox/client.go | 1 + pkg/proxmox/goproxmox/api_client.go | 46 ++++ pkg/proxmox/goproxmox/api_client_test.go | 91 +++++++ pkg/proxmox/proxmoxtest/mock_client.go | 60 +++++ templates/cluster-template-auto-image.yaml | 250 ++++++++++++++++++ 16 files changed, 618 insertions(+), 9 deletions(-) create mode 100644 templates/cluster-template-auto-image.yaml diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index d2fd7119..639f3a43 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -162,12 +162,16 @@ type VirtualMachineCloneSpec struct { // will be cloned onto the same node as SourceNode. // // +kubebuilder:validation:MinLength=1 + // +optional SourceNode string `json:"sourceNode"` // TemplateID the vm_template vmid used for cloning a new VM. // +optional TemplateID *int32 `json:"templateID,omitempty"` + // +optional + TemplateSelector *TemplateSelector `json:"templateSelector,omitempty"` + // Description for the new VM. // +optional Description *string `json:"description,omitempty"` @@ -202,6 +206,14 @@ type VirtualMachineCloneSpec struct { Target *string `json:"target,omitempty"` } +// TemplateSelector defines tags for looking up images. +type TemplateSelector struct { + // Specifies all tags to look for, when looking up the VM template. + // + // +kubebuilder:validation:MinItems=1 + MatchTags []string `json:"matchTags"` +} + // NetworkSpec defines the virtual machine's network configuration. type NetworkSpec struct { // Default is the default network device, @@ -526,9 +538,20 @@ func (r *ProxmoxMachine) GetTemplateID() int32 { return -1 } +// GetTemplateSelectorTags get the tags, the desired vm template should have. +func (r *ProxmoxMachine) GetTemplateSelectorTags() []string { + if r.Spec.TemplateSelector != nil && r.Spec.TemplateSelector.MatchTags != nil { + return r.Spec.TemplateSelector.MatchTags + } + return nil +} + // GetNode get the Proxmox node used to provision this machine. func (r *ProxmoxMachine) GetNode() string { - return r.Spec.SourceNode + if r.Spec.SourceNode != "" { + return r.Spec.SourceNode + } + return "" } // FormatSize returns the format required for the Proxmox API. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index fa1493f9..451235a8 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -897,6 +897,26 @@ func (in *Storage) DeepCopy() *Storage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSelector) DeepCopyInto(out *TemplateSelector) { + *out = *in + if in.MatchTags != nil { + in, out := &in.MatchTags, &out.MatchTags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSelector. +func (in *TemplateSelector) DeepCopy() *TemplateSelector { + if in == nil { + return nil + } + out := new(TemplateSelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VRFDevice) DeepCopyInto(out *VRFDevice) { *out = *in @@ -948,6 +968,11 @@ func (in *VirtualMachineCloneSpec) DeepCopyInto(out *VirtualMachineCloneSpec) { *out = new(int32) **out = **in } + if in.TemplateSelector != nil { + in, out := &in.TemplateSelector, &out.TemplateSelector + *out = new(TemplateSelector) + (*in).DeepCopyInto(*out) + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 6dc1e677..6593a2f1 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -548,13 +548,25 @@ spec: a new VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines tags for looking up + images. + properties: + matchTags: + description: Specifies all tags to look for, when looking + up the VM template. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. format: int64 type: integer - required: - - sourceNode type: object type: object x-kubernetes-validations: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 1dc4410e..60536363 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -588,13 +588,25 @@ spec: for cloning a new VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines tags for looking + up images. + properties: + matchTags: + description: Specifies all tags to look for, + when looking up the VM template. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. format: int64 type: integer - required: - - sourceNode type: object type: object x-kubernetes-validations: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index a8c06f61..0a3ee747 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -513,13 +513,24 @@ spec: VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines tags for looking up images. + properties: + matchTags: + description: Specifies all tags to look for, when looking up the + VM template. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. format: int64 type: integer - required: - - sourceNode type: object x-kubernetes-validations: - message: Must set full=true when specifying format diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index f8967773..3318d386 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -546,13 +546,25 @@ spec: a new VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines tags for looking up + images. + properties: + matchTags: + description: Specifies all tags to look for, when looking + up the VM template. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. format: int64 type: integer - required: - - sourceNode type: object required: - spec diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 8c10f3a2..d24918e8 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -176,6 +176,13 @@ This behaviour can be configured in the `ProxmoxCluster` CR through the field `. For example, setting it to `0` (zero), entirely disables scheduling based on memory. Alternatively, if you set it to any value greater than `0`, the scheduler will treat your host as it would have `${value}%` of memory. In real numbers that would mean, if you have a host with 64GB of memory and set the number to `300`, the scheduler would allow you to provision guests with a total of 192GB memory and therefore overprovision the host. (Use with caution! It's strongly suggested to have memory ballooning configured everywhere.). Or, if you were to set it to `95` for example, it would treat your host as it would only have 60,8GB of memory, and leave the remaining 3,2GB for the host. +## Template lookup based on Proxmox tags + +Our provider is able to look up templates based on their attached tags, for `ProxmoxMachine` resources, that make use of an tag selector. + +For example, you can set the `TEMPLATE_TAGS="tag1,tag2"` environment variable. Your custom image will then be used when using the [auto-image](https://github.com/ionos-cloud/cluster-api-provider-ionoscloud/blob/main/templates/cluster-template-auto-image.yaml) template. + + ## Proxmox RBAC with least privileges For the Proxmox API user/token you create for CAPMOX, these are the minimum required permissions. diff --git a/envfile.example b/envfile.example index c334778d..b111be9c 100644 --- a/envfile.example +++ b/envfile.example @@ -3,6 +3,7 @@ export PROXMOX_TOKEN="" export PROXMOX_SECRET="" export PROXMOX_SOURCENODE="pve" export TEMPLATE_VMID=100 +export TEMPLATE_TAGS="tag1,tag2" export VM_SSH_KEYS="ssh-ed25519 ..., ssh-ed25519 ..." export KUBERNETES_VERSION="1.25.1" export CONTROL_PLANE_ENDPOINT_IP=10.10.10.4 diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index a68aeb5b..ca64d954 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -371,6 +371,17 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } templateID := scope.ProxmoxMachine.GetTemplateID() + if templateID == -1 { + var err error + + templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() + options.Node, templateID, err = scope.InfraCluster.ProxmoxClient.FindVMTemplateByTags(ctx, templateSelectorTags) + + if err != nil { + scope.SetFailureMessage(err) + return proxmox.VMCloneResponse{}, err + } + } res, err := scope.InfraCluster.ProxmoxClient.CloneVM(ctx, int(templateID), options) if err != nil { return res, err diff --git a/internal/webhook/proxmoxmachine_webhook.go b/internal/webhook/proxmoxmachine_webhook.go index 9134541f..e6c11a4a 100644 --- a/internal/webhook/proxmoxmachine_webhook.go +++ b/internal/webhook/proxmoxmachine_webhook.go @@ -59,6 +59,12 @@ func (p *ProxmoxMachine) ValidateCreate(_ context.Context, obj runtime.Object) ( return warnings, err } + err = validateTemplate(machine) + if err != nil { + warnings = append(warnings, fmt.Sprintf("cannot create proxmox machine %s", machine.GetName())) + return warnings, err + } + return warnings, nil } @@ -75,6 +81,12 @@ func (p *ProxmoxMachine) ValidateUpdate(_ context.Context, _, newObj runtime.Obj return warnings, err } + err = validateTemplate(newMachine) + if err != nil { + warnings = append(warnings, fmt.Sprintf("cannot create proxmox machine %s", newMachine.GetName())) + return warnings, err + } + return warnings, nil } @@ -83,6 +95,32 @@ func (p *ProxmoxMachine) ValidateDelete(_ context.Context, _ runtime.Object) (wa return nil, nil } +func validateTemplate(machine *infrav1.ProxmoxMachine) error { + gk, name := machine.GroupVersionKind().GroupKind(), machine.GetName() + + if (machine.Spec.TemplateID != nil || machine.Spec.SourceNode != "") && (machine.Spec.TemplateSelector != nil) { + return apierrors.NewInvalid( + gk, + name, + field.ErrorList{ + field.Invalid( + field.NewPath("spec"), machine.Spec, "spec.sourceNode AND spec.templateID can not be used in combination with spec.templateSelector"), + }) + } + + if (machine.Spec.TemplateID == nil || machine.Spec.SourceNode == "") && (machine.Spec.TemplateSelector == nil) { + return apierrors.NewInvalid( + gk, + name, + field.ErrorList{ + field.Invalid( + field.NewPath("spec"), machine.Spec, "must define either spec.sourceNode AND spec.templateID, or spec.templateSelector"), + }) + } + + return nil +} + func validateNetworks(machine *infrav1.ProxmoxMachine) error { if machine.Spec.Network == nil { return nil diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index 025a385f..0f77225c 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -77,6 +77,14 @@ var _ = Describe("Controller Test", func() { machine.Spec.Network.AdditionalDevices[0].InterfaceConfig.Routing.RoutingPolicy[0].Table = nil g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("routing policy [0] requires a table"))) }) + + It("should disallow machine with both sourceNode/templateID AND TemplateSelector", func() { + machine := validProxmoxMachine("test-machine") + machine.Spec.TemplateSelector = &infrav1.TemplateSelector{ + MatchTags: []string{"foo", "bar"}, + } + g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.sourceNode AND spec.templateID can not be used in combination with spec.templateSelector"))) + }) }) Context("update proxmox cluster", func() { @@ -111,6 +119,7 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine { Spec: infrav1.ProxmoxMachineSpec{ VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ SourceNode: "pve", + TemplateID: ptr.To(int32(1337)), }, NumSockets: 1, NumCores: 1, diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 4c5f9431..090b217d 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -30,6 +30,7 @@ type Client interface { ConfigureVM(ctx context.Context, vm *proxmox.VirtualMachine, options ...VirtualMachineOption) (*proxmox.Task, error) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.ClusterResource, error) + FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) GetVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.VirtualMachine, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index e9ecef4a..b41c06ba 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "net/url" + "slices" "strings" "github.com/go-logr/logr" @@ -141,6 +142,51 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } +// FindVMTemplateByTags tries to find a VMID by its tags across the whole cluster. +func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { + vmTemplates := make([]*proxmox.ClusterResource, 0) + + sortedTags := make([]string, len(templateTags)) + for i, tag := range templateTags { + // Proxmox VM tags are always lowercase + sortedTags[i] = strings.ToLower(tag) + } + slices.Sort(sortedTags) + uniqueTags := slices.Compact(sortedTags) + + cluster, err := c.Cluster(ctx) + if err != nil { + return "", -1, fmt.Errorf("cannot get cluster status: %w", err) + } + + vmResources, err := cluster.Resources(ctx, "vm") + if err != nil { + return "", -1, fmt.Errorf("could not list vm resources: %w", err) + } + + for _, vm := range vmResources { + if vm.Template == 0 { + continue + } + if len(vm.Tags) == 0 { + continue + } + + vmTags := strings.Split(vm.Tags, ";") + slices.Sort(vmTags) + + if slices.Equal(vmTags, uniqueTags) { + vmTemplates = append(vmTemplates, vm) + } + } + + if n := len(vmTemplates); n != 1 { + return "", -1, fmt.Errorf("found %d VM templates with tags %q", n, templateTags) + } + + return vmTemplates[0].Node, int32(vmTemplates[0].VMID), nil +} + // DeleteVM deletes a VM based on the nodeName and vmID. func (c *APIClient) DeleteVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.Task, error) { // A vmID can not be lower than 100. diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index 64042444..e1c07d88 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -371,6 +371,97 @@ func TestProxmoxAPIClient_FindVMResource(t *testing.T) { } } +func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { + proxmoxClusterResources := proxmox.ClusterResources{ + &proxmox.ClusterResource{VMID: 101, Name: "k8s-node01", Node: "capmox01", Tags: ""}, + &proxmox.ClusterResource{VMID: 102, Name: "k8s-node02", Node: "capmox02", Tags: ""}, + &proxmox.ClusterResource{VMID: 201, Name: "ubuntu-22.04-k8s-v1.28.3", Node: "capmox01", Tags: "template;capmox;v1.28.3", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 202, Name: "ubuntu-22.04-k8s-v1.30.2", Node: "capmox02", Tags: "capmox;template;v1.30.2", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 301, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 302, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, + } + tests := []struct { + name string + vmTags []string + fails bool + err string + vmTemplateNode string + vmTemplateID int32 + }{ + { + name: "find-template", + vmTags: []string{"template", "capmox", "v1.28.3"}, + fails: false, + err: "", + vmTemplateNode: "capmox01", + vmTemplateID: 201, + }, + { + // Proxmox VM tags are always lowercase + name: "find-template-uppercase", + vmTags: []string{"TEMPLATE", "CAPMOX", "v1.28.3"}, + fails: false, + err: "", + vmTemplateNode: "capmox01", + vmTemplateID: 201, + }, + { + name: "find-template-unordered", + vmTags: []string{"template", "capmox", "v1.30.2"}, + fails: false, + err: "", + vmTemplateNode: "capmox02", + vmTemplateID: 202, + }, + { + name: "find-template-duplicate-tag", + vmTags: []string{"template", "capmox", "capmox", "v1.30.2"}, + fails: false, + err: "", + vmTemplateNode: "capmox02", + vmTemplateID: 202, + }, + { + name: "find-multiple-templates", + vmTags: []string{"template", "capmox"}, + fails: true, + err: "found 0 VM templates with tags [\"template\" \"capmox\"]", + vmTemplateID: 69, + vmTemplateNode: "nice", + }, + { + name: "find-multiple-templates", + vmTags: []string{"template", "capmox", "v1.29.2"}, + fails: true, + err: "found 2 VM templates with tags [\"template\" \"capmox\" \"v1.29.2\"]", + vmTemplateID: 69, + vmTemplateNode: "nice", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := newTestClient(t) + + httpmock.RegisterResponder(http.MethodGet, `=~/cluster/status`, + newJSONResponder(200, proxmox.NodeStatuses{})) + httpmock.RegisterResponder(http.MethodGet, `=~/cluster/resources`, + newJSONResponder(200, proxmoxClusterResources)) + + vmTemplateNode, vmTemplateID, err := client.FindVMTemplateByTags(context.Background(), test.vmTags) + + if test.fails { + require.Error(t, err) + require.Equal(t, test.err, err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, vmTemplateID, test.vmTemplateID) + require.Equal(t, vmTemplateNode, test.vmTemplateNode) + } + }) + } +} + func TestProxmoxAPIClient_DeleteVM(t *testing.T) { tests := []struct { name string diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index 8492503f..1931246c 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -312,6 +312,66 @@ func (_c *MockClient_FindVMResource_Call) RunAndReturn(run func(context.Context, return _c } +// FindVMTemplateByTags provides a mock function with given fields: ctx, templateTags +func (_m *MockClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { + ret := _m.Called(ctx, templateTags) + + var r0 string + var r1 int32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []string) (string, int32, error)); ok { + return rf(ctx, templateTags) + } + if rf, ok := ret.Get(0).(func(context.Context, []string) string); ok { + r0 = rf(ctx, templateTags) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, []string) int32); ok { + r1 = rf(ctx, templateTags) + } else { + r1 = ret.Get(1).(int32) + } + + if rf, ok := ret.Get(2).(func(context.Context, []string) error); ok { + r2 = rf(ctx, templateTags) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockClient_FindVMTemplateByTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindVMTemplateByTags' +type MockClient_FindVMTemplateByTags_Call struct { + *mock.Call +} + +// FindVMTemplateByTags is a helper method to define mock.On call +// - ctx context.Context +// - templateTags []string +func (_e *MockClient_Expecter) FindVMTemplateByTags(ctx interface{}, templateTags interface{}) *MockClient_FindVMTemplateByTags_Call { + return &MockClient_FindVMTemplateByTags_Call{Call: _e.mock.On("FindVMTemplateByTags", ctx, templateTags)} +} + +func (_c *MockClient_FindVMTemplateByTags_Call) Run(run func(ctx context.Context, templateTags []string)) *MockClient_FindVMTemplateByTags_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]string)) + }) + return _c +} + +func (_c *MockClient_FindVMTemplateByTags_Call) Return(_a0 string, _a1 int32, _a2 error) *MockClient_FindVMTemplateByTags_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockClient_FindVMTemplateByTags_Call) RunAndReturn(run func(context.Context, []string) (string, int32, error)) *MockClient_FindVMTemplateByTags_Call { + _c.Call.Return(run) + return _c +} + // GetReservableMemoryBytes provides a mock function with given fields: ctx, nodeName, nodeMemoryAdjustment func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { ret := _m.Called(ctx, nodeName, nodeMemoryAdjustment) diff --git a/templates/cluster-template-auto-image.yaml b/templates/cluster-template-auto-image.yaml new file mode 100644 index 00000000..b21a89ea --- /dev/null +++ b/templates/cluster-template-auto-image.yaml @@ -0,0 +1,250 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + ipv4Config: + addresses: ${NODE_IP_RANGES} + prefix: ${IP_PREFIX} + gateway: ${GATEWAY} + dnsServers: ${DNS_SERVERS} + allowedNodes: ${ALLOWED_NODES:=[]} +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: ProxmoxMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.7.1 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - localhost + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - path: /etc/kube-vip-prepare.sh + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + permissions: "0700" + preKubeadmCommands: + - /etc/kube-vip-prepare.sh + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" + version: "${KUBERNETES_VERSION}" +--- +kind: ProxmoxMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + templateSelector: + matchTags: [${TEMPLATE_TAGS}] + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + templateSelector: + matchTags: [${TEMPLATE_TAGS}] + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'"