From 5bfddfcbe6924635425268bb350719b7118981fd Mon Sep 17 00:00:00 2001
From: Igor Velichkovich <ivelichkovich@gmail.com>
Date: Tue, 16 Apr 2024 17:00:52 -0700
Subject: [PATCH] WIP controller prototype

---
 .gitignore                                    |   2 +
 Dockerfile                                    |   1 +
 .../node_slice_controller.go                  |  90 +++
 cmd/whereabouts.go                            |  14 +-
 ...hereabouts.cni.cncf.io_nodeslicepools.yaml |  76 ++
 hack/build-go.sh                              |   2 +
 .../v1alpha1/nodeslicepool_types.go           |  52 ++
 .../v1alpha1/register.go                      |   2 +
 .../v1alpha1/zz_generated.deepcopy.go         | 109 +++
 .../v1alpha1/fake/fake_nodeslicepool.go       | 141 ++++
 .../fake_whereabouts.cni.cncf.io_client.go    |   4 +
 .../v1alpha1/generated_expansion.go           |   2 +
 .../v1alpha1/nodeslicepool.go                 | 194 +++++
 .../whereabouts.cni.cncf.io_client.go         |   5 +
 .../informers/externalversions/generic.go     |   2 +
 .../v1alpha1/interface.go                     |   7 +
 .../v1alpha1/nodeslicepool.go                 |  89 +++
 .../v1alpha1/expansion_generated.go           |   8 +
 .../v1alpha1/nodeslicepool.go                 |  98 +++
 pkg/config/config.go                          |  11 +
 pkg/iphelpers/iphelpers.go                    |  59 ++
 pkg/node-controller/controller.go             | 676 ++++++++++++++++++
 pkg/node-controller/signals/signals.go        |  28 +
 pkg/node-controller/signals/signals_posix.go  |   8 +
 pkg/storage/kubernetes/ipam.go                |  93 ++-
 pkg/types/types.go                            |   6 +
 26 files changed, 1763 insertions(+), 16 deletions(-)
 create mode 100644 cmd/nodeslicecontroller/node_slice_controller.go
 create mode 100644 doc/crds/whereabouts.cni.cncf.io_nodeslicepools.yaml
 create mode 100644 pkg/api/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool_types.go
 create mode 100644 pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_nodeslicepool.go
 create mode 100644 pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
 create mode 100644 pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
 create mode 100644 pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
 create mode 100644 pkg/node-controller/controller.go
 create mode 100644 pkg/node-controller/signals/signals.go
 create mode 100644 pkg/node-controller/signals/signals_posix.go

diff --git a/.gitignore b/.gitignore
index 1eecf4af1..a10637778 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,8 @@
 
 # Output of the go coverage tool, specifically when used with LiteIDE
 *.out
+.idea
+kind/
 
 bin/
 /github.com/
diff --git a/Dockerfile b/Dockerfile
index a76f26af1..ae6dd8eba 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -9,5 +9,6 @@ FROM alpine:latest
 LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/whereabouts
 COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/whereabouts .
 COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-control-loop .
+COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/node-slice-controller .
 COPY script/install-cni.sh .
 CMD ["/install-cni.sh"]
diff --git a/cmd/nodeslicecontroller/node_slice_controller.go b/cmd/nodeslicecontroller/node_slice_controller.go
new file mode 100644
index 000000000..0e03a78a2
--- /dev/null
+++ b/cmd/nodeslicecontroller/node_slice_controller.go
@@ -0,0 +1,90 @@
+package main
+
+import (
+	"flag"
+	nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned"
+	nadinformers "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions"
+	clientset "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
+	informers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions"
+	node_controller "github.com/k8snetworkplumbingwg/whereabouts/pkg/node-controller"
+
+	"time"
+
+	kubeinformers "k8s.io/client-go/informers"
+	"k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/tools/clientcmd"
+	"k8s.io/klog/v2"
+
+	"github.com/k8snetworkplumbingwg/whereabouts/pkg/node-controller/signals"
+)
+
+var (
+	masterURL  string
+	kubeconfig string
+)
+
+// TODO: leader election
+func main() {
+	klog.InitFlags(nil)
+	flag.Parse()
+
+	// set up signals so we handle the shutdown signal gracefully
+	ctx := signals.SetupSignalHandler()
+	logger := klog.FromContext(ctx)
+
+	cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
+	if err != nil {
+		logger.Error(err, "Error building kubeconfig")
+		klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+	}
+
+	kubeClient, err := kubernetes.NewForConfig(cfg)
+	if err != nil {
+		logger.Error(err, "Error building kubernetes clientset")
+		klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+	}
+
+	whereaboutsClient, err := clientset.NewForConfig(cfg)
+	if err != nil {
+		logger.Error(err, "Error building kubernetes clientset")
+		klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+	}
+
+	nadClient, err := nadclient.NewForConfig(cfg)
+	if err != nil {
+		logger.Error(err, "Error building kubernetes clientset")
+		klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+	}
+
+	kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
+	whereaboutsInformerFactory := informers.NewSharedInformerFactory(whereaboutsClient, time.Second*30)
+	nadInformerFactory := nadinformers.NewSharedInformerFactory(nadClient, time.Second*30)
+
+	controller := node_controller.NewController(
+		ctx,
+		kubeClient,
+		whereaboutsClient,
+		nadClient,
+		kubeInformerFactory.Core().V1().Nodes(),
+		whereaboutsInformerFactory.Whereabouts().V1alpha1().NodeSlicePools(),
+		whereaboutsInformerFactory.Whereabouts().V1alpha1().IPPools(),
+		nadInformerFactory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions(),
+	)
+
+	// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(ctx.done())
+	// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
+	kubeInformerFactory.Start(ctx.Done())
+	whereaboutsInformerFactory.Start(ctx.Done())
+	nadInformerFactory.Start(ctx.Done())
+
+	//TODO: make workers configurable via flag, what is a sane value here? How will concurrency work?
+	if err = controller.Run(ctx, 1); err != nil {
+		logger.Error(err, "Error running controller")
+		klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+	}
+}
+
+func init() {
+	flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
+	flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
+}
diff --git a/cmd/whereabouts.go b/cmd/whereabouts.go
index de664307b..fd77123ef 100644
--- a/cmd/whereabouts.go
+++ b/cmd/whereabouts.go
@@ -64,7 +64,7 @@ func cmdCheck(args *skel.CmdArgs) error {
 	return fmt.Errorf("CNI CHECK method is not implemented")
 }
 
-func cmdAdd(args *skel.CmdArgs, client *kubernetes.KubernetesIPAM, cniVersion string) error {
+func cmdAdd(args *skel.CmdArgs, client *kubernetes.KubernetesIPAM, cniVersion string) (err error) {
 	// Initialize our result, and assign DNS & routing.
 	result := &current.Result{}
 	result.DNS = client.Config.DNS
@@ -76,7 +76,12 @@ func cmdAdd(args *skel.CmdArgs, client *kubernetes.KubernetesIPAM, cniVersion st
 	ctx, cancel := context.WithTimeout(context.Background(), types.AddTimeLimit)
 	defer cancel()
 
-	newips, err := kubernetes.IPManagement(ctx, types.Allocate, client.Config, client)
+	if client.Config.Range != "" && client.Config.NodeSliceFastRange != "" {
+		logging.Errorf("Configuration error, cannot use both NodeSliceFastRange and regular Range")
+		return fmt.Errorf("configuration error, cannot use both NodeSliceFastRange and regular Range")
+	}
+
+	newips, err = kubernetes.IPManagement(ctx, types.Allocate, client.Config, client)
 	if err != nil {
 		logging.Errorf("Error at storage engine: %s", err)
 		return fmt.Errorf("error at storage engine: %w", err)
@@ -104,6 +109,11 @@ func cmdDel(args *skel.CmdArgs, client *kubernetes.KubernetesIPAM) error {
 	ctx, cancel := context.WithTimeout(context.Background(), types.DelTimeLimit)
 	defer cancel()
 
+	if client.Config.Range != "" && client.Config.NodeSliceFastRange != "" {
+		logging.Errorf("Configuration error, cannot use both NodeSliceFastRange and regular Range")
+		return fmt.Errorf("configuration error, cannot use both NodeSliceFastRange and regular Range")
+	}
+
 	_, err := kubernetes.IPManagement(ctx, types.Deallocate, client.Config, client)
 	if err != nil {
 		logging.Verbosef("WARNING: Problem deallocating IP: %s", err)
diff --git a/doc/crds/whereabouts.cni.cncf.io_nodeslicepools.yaml b/doc/crds/whereabouts.cni.cncf.io_nodeslicepools.yaml
new file mode 100644
index 000000000..fa4f6a2d7
--- /dev/null
+++ b/doc/crds/whereabouts.cni.cncf.io_nodeslicepools.yaml
@@ -0,0 +1,76 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    controller-gen.kubebuilder.io/version: v0.4.1
+  creationTimestamp: null
+  name: nodeslicepools.whereabouts.cni.cncf.io
+spec:
+  group: whereabouts.cni.cncf.io
+  names:
+    kind: NodeSlicePool
+    listKind: NodeSlicePoolList
+    plural: nodeslicepools
+    singular: nodeslicepool
+  scope: Namespaced
+  versions:
+  - name: v1alpha1
+    schema:
+      openAPIV3Schema:
+        description: NodeSlicePool is the Schema for the nodesliceippools API
+        properties:
+          apiVersion:
+            description: 'APIVersion defines the versioned schema of this representation
+              of an object. Servers should convert recognized schemas to the latest
+              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+            type: string
+          kind:
+            description: 'Kind is a string value representing the REST resource this
+              object represents. Servers may infer this from the endpoint the client
+              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+            type: string
+          metadata:
+            type: object
+          spec:
+            description: NodeSlicePoolSpec defines the desired state of NodeSlicePool
+            properties:
+              range:
+                description: Range is a RFC 4632/4291-style string that represents
+                  an IP address and prefix length in CIDR notation this refers to
+                  the entire range where the node is allocated a subset
+                type: string
+              sliceSize:
+                type: string
+            required:
+            - range
+            - sliceSize
+            type: object
+          status:
+            description: NodeSlicePoolStatus defines the desired state of NodeSlicePool
+            properties:
+              allocations:
+                items:
+                  properties:
+                    nodeName:
+                      type: string
+                    sliceRange:
+                      type: string
+                  required:
+                  - nodeName
+                  - sliceRange
+                  type: object
+                type: array
+            required:
+            - allocations
+            type: object
+        type: object
+    served: true
+    storage: true
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
diff --git a/hack/build-go.sh b/hack/build-go.sh
index 0d615169e..f8b9f78e1 100755
--- a/hack/build-go.sh
+++ b/hack/build-go.sh
@@ -46,3 +46,5 @@ GLDFLAGS="${GLDFLAGS} ${VERSION_LDFLAGS}"
 
 CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/${cmd} cmd/${cmd}.go
 CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/ip-control-loop cmd/controlloop/*.go
+CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/node-slice-controller cmd/nodeslicecontroller/*.go
+
diff --git a/pkg/api/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool_types.go b/pkg/api/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool_types.go
new file mode 100644
index 000000000..03b75af24
--- /dev/null
+++ b/pkg/api/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool_types.go
@@ -0,0 +1,52 @@
+package v1alpha1
+
+import (
+	"net"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NodeSlicePoolSpec defines the desired state of NodeSlicePool
+type NodeSlicePoolSpec struct {
+	// Range is a RFC 4632/4291-style string that represents an IP address and prefix length in CIDR notation
+	// this refers to the entire range where the node is allocated a subset
+	Range string `json:"range"`
+
+	SliceSize string `json:"sliceSize"`
+}
+
+// NodeSlicePoolStatus defines the desired state of NodeSlicePool
+type NodeSlicePoolStatus struct {
+	Allocations []NodeSliceAllocation `json:"allocations"`
+}
+
+type NodeSliceAllocation struct {
+	NodeName   string `json:"nodeName"`
+	SliceRange string `json:"sliceRange"`
+}
+
+// ParseCIDR formats the Range of the IPPool
+func (i NodeSlicePool) ParseCIDR() (net.IP, *net.IPNet, error) {
+	return net.ParseCIDR(i.Spec.Range)
+}
+
+// +genclient
+// +kubebuilder:object:root=true
+
+// NodeSlicePool is the Schema for the nodesliceippools API
+type NodeSlicePool struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   NodeSlicePoolSpec   `json:"spec,omitempty"`
+	Status NodeSlicePoolStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// NodeSlicePoolList contains a list of NodeSlicePool
+type NodeSlicePoolList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []NodeSlicePool `json:"items"`
+}
diff --git a/pkg/api/whereabouts.cni.cncf.io/v1alpha1/register.go b/pkg/api/whereabouts.cni.cncf.io/v1alpha1/register.go
index b3a1b6f27..1c96503bd 100644
--- a/pkg/api/whereabouts.cni.cncf.io/v1alpha1/register.go
+++ b/pkg/api/whereabouts.cni.cncf.io/v1alpha1/register.go
@@ -58,6 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 		&IPPoolList{},
 		&OverlappingRangeIPReservation{},
 		&OverlappingRangeIPReservationList{},
+		&NodeSlicePool{},
+		&NodeSlicePoolList{},
 	)
 	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
 	return nil
diff --git a/pkg/api/whereabouts.cni.cncf.io/v1alpha1/zz_generated.deepcopy.go b/pkg/api/whereabouts.cni.cncf.io/v1alpha1/zz_generated.deepcopy.go
index 18d1e4b1f..c23bbddc4 100644
--- a/pkg/api/whereabouts.cni.cncf.io/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/api/whereabouts.cni.cncf.io/v1alpha1/zz_generated.deepcopy.go
@@ -104,6 +104,115 @@ func (in *IPPoolSpec) DeepCopy() *IPPoolSpec {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSliceAllocation) DeepCopyInto(out *NodeSliceAllocation) {
+	*out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSliceAllocation.
+func (in *NodeSliceAllocation) DeepCopy() *NodeSliceAllocation {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSliceAllocation)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSlicePool) DeepCopyInto(out *NodeSlicePool) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePool.
+func (in *NodeSlicePool) DeepCopy() *NodeSlicePool {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSlicePool)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeSlicePool) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSlicePoolList) DeepCopyInto(out *NodeSlicePoolList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]NodeSlicePool, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePoolList.
+func (in *NodeSlicePoolList) DeepCopy() *NodeSlicePoolList {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSlicePoolList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeSlicePoolList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSlicePoolSpec) DeepCopyInto(out *NodeSlicePoolSpec) {
+	*out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePoolSpec.
+func (in *NodeSlicePoolSpec) DeepCopy() *NodeSlicePoolSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSlicePoolSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSlicePoolStatus) DeepCopyInto(out *NodeSlicePoolStatus) {
+	*out = *in
+	if in.Allocations != nil {
+		in, out := &in.Allocations, &out.Allocations
+		*out = make([]NodeSliceAllocation, len(*in))
+		copy(*out, *in)
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePoolStatus.
+func (in *NodeSlicePoolStatus) DeepCopy() *NodeSlicePoolStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSlicePoolStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *OverlappingRangeIPReservation) DeepCopyInto(out *OverlappingRangeIPReservation) {
 	*out = *in
diff --git a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_nodeslicepool.go b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_nodeslicepool.go
new file mode 100644
index 000000000..4f250df4d
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_nodeslicepool.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2024 The Kubernetes Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+	"context"
+
+	v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	labels "k8s.io/apimachinery/pkg/labels"
+	schema "k8s.io/apimachinery/pkg/runtime/schema"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	testing "k8s.io/client-go/testing"
+)
+
+// FakeNodeSlicePools implements NodeSlicePoolInterface
+type FakeNodeSlicePools struct {
+	Fake *FakeWhereaboutsV1alpha1
+	ns   string
+}
+
+var nodeslicepoolsResource = schema.GroupVersionResource{Group: "whereabouts.cni.cncf.io", Version: "v1alpha1", Resource: "nodeslicepools"}
+
+var nodeslicepoolsKind = schema.GroupVersionKind{Group: "whereabouts.cni.cncf.io", Version: "v1alpha1", Kind: "NodeSlicePool"}
+
+// Get takes name of the nodeSlicePool, and returns the corresponding nodeSlicePool object, and an error if there is any.
+func (c *FakeNodeSlicePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	obj, err := c.Fake.
+		Invokes(testing.NewGetAction(nodeslicepoolsResource, c.ns, name), &v1alpha1.NodeSlicePool{})
+
+	if obj == nil {
+		return nil, err
+	}
+	return obj.(*v1alpha1.NodeSlicePool), err
+}
+
+// List takes label and field selectors, and returns the list of NodeSlicePools that match those selectors.
+func (c *FakeNodeSlicePools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeSlicePoolList, err error) {
+	obj, err := c.Fake.
+		Invokes(testing.NewListAction(nodeslicepoolsResource, nodeslicepoolsKind, c.ns, opts), &v1alpha1.NodeSlicePoolList{})
+
+	if obj == nil {
+		return nil, err
+	}
+
+	label, _, _ := testing.ExtractFromListOptions(opts)
+	if label == nil {
+		label = labels.Everything()
+	}
+	list := &v1alpha1.NodeSlicePoolList{ListMeta: obj.(*v1alpha1.NodeSlicePoolList).ListMeta}
+	for _, item := range obj.(*v1alpha1.NodeSlicePoolList).Items {
+		if label.Matches(labels.Set(item.Labels)) {
+			list.Items = append(list.Items, item)
+		}
+	}
+	return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested nodeSlicePools.
+func (c *FakeNodeSlicePools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+	return c.Fake.
+		InvokesWatch(testing.NewWatchAction(nodeslicepoolsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a nodeSlicePool and creates it.  Returns the server's representation of the nodeSlicePool, and an error, if there is any.
+func (c *FakeNodeSlicePools) Create(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.CreateOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	obj, err := c.Fake.
+		Invokes(testing.NewCreateAction(nodeslicepoolsResource, c.ns, nodeSlicePool), &v1alpha1.NodeSlicePool{})
+
+	if obj == nil {
+		return nil, err
+	}
+	return obj.(*v1alpha1.NodeSlicePool), err
+}
+
+// Update takes the representation of a nodeSlicePool and updates it. Returns the server's representation of the nodeSlicePool, and an error, if there is any.
+func (c *FakeNodeSlicePools) Update(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	obj, err := c.Fake.
+		Invokes(testing.NewUpdateAction(nodeslicepoolsResource, c.ns, nodeSlicePool), &v1alpha1.NodeSlicePool{})
+
+	if obj == nil {
+		return nil, err
+	}
+	return obj.(*v1alpha1.NodeSlicePool), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeNodeSlicePools) UpdateStatus(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (*v1alpha1.NodeSlicePool, error) {
+	obj, err := c.Fake.
+		Invokes(testing.NewUpdateSubresourceAction(nodeslicepoolsResource, "status", c.ns, nodeSlicePool), &v1alpha1.NodeSlicePool{})
+
+	if obj == nil {
+		return nil, err
+	}
+	return obj.(*v1alpha1.NodeSlicePool), err
+}
+
+// Delete takes name of the nodeSlicePool and deletes it. Returns an error if one occurs.
+func (c *FakeNodeSlicePools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+	_, err := c.Fake.
+		Invokes(testing.NewDeleteActionWithOptions(nodeslicepoolsResource, c.ns, name, opts), &v1alpha1.NodeSlicePool{})
+
+	return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNodeSlicePools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+	action := testing.NewDeleteCollectionAction(nodeslicepoolsResource, c.ns, listOpts)
+
+	_, err := c.Fake.Invokes(action, &v1alpha1.NodeSlicePoolList{})
+	return err
+}
+
+// Patch applies the patch and returns the patched nodeSlicePool.
+func (c *FakeNodeSlicePools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeSlicePool, err error) {
+	obj, err := c.Fake.
+		Invokes(testing.NewPatchSubresourceAction(nodeslicepoolsResource, c.ns, name, pt, data, subresources...), &v1alpha1.NodeSlicePool{})
+
+	if obj == nil {
+		return nil, err
+	}
+	return obj.(*v1alpha1.NodeSlicePool), err
+}
diff --git a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_whereabouts.cni.cncf.io_client.go b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_whereabouts.cni.cncf.io_client.go
index dd1177fbe..b8aa1b141 100644
--- a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_whereabouts.cni.cncf.io_client.go
+++ b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/fake/fake_whereabouts.cni.cncf.io_client.go
@@ -31,6 +31,10 @@ func (c *FakeWhereaboutsV1alpha1) IPPools(namespace string) v1alpha1.IPPoolInter
 	return &FakeIPPools{c, namespace}
 }
 
+func (c *FakeWhereaboutsV1alpha1) NodeSlicePools(namespace string) v1alpha1.NodeSlicePoolInterface {
+	return &FakeNodeSlicePools{c, namespace}
+}
+
 func (c *FakeWhereaboutsV1alpha1) OverlappingRangeIPReservations(namespace string) v1alpha1.OverlappingRangeIPReservationInterface {
 	return &FakeOverlappingRangeIPReservations{c, namespace}
 }
diff --git a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/generated_expansion.go
index 529523cbb..bd49da3fb 100644
--- a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/generated_expansion.go
+++ b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/generated_expansion.go
@@ -19,4 +19,6 @@ package v1alpha1
 
 type IPPoolExpansion interface{}
 
+type NodeSlicePoolExpansion interface{}
+
 type OverlappingRangeIPReservationExpansion interface{}
diff --git a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
new file mode 100644
index 000000000..b099dd7f4
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2024 The Kubernetes Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"context"
+	"time"
+
+	v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
+	scheme "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned/scheme"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	rest "k8s.io/client-go/rest"
+)
+
+// NodeSlicePoolsGetter has a method to return a NodeSlicePoolInterface.
+// A group's client should implement this interface.
+type NodeSlicePoolsGetter interface {
+	NodeSlicePools(namespace string) NodeSlicePoolInterface
+}
+
+// NodeSlicePoolInterface has methods to work with NodeSlicePool resources.
+type NodeSlicePoolInterface interface {
+	Create(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.CreateOptions) (*v1alpha1.NodeSlicePool, error)
+	Update(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (*v1alpha1.NodeSlicePool, error)
+	UpdateStatus(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (*v1alpha1.NodeSlicePool, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodeSlicePool, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodeSlicePoolList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeSlicePool, err error)
+	NodeSlicePoolExpansion
+}
+
+// nodeSlicePools implements NodeSlicePoolInterface
+type nodeSlicePools struct {
+	client rest.Interface
+	ns     string
+}
+
+// newNodeSlicePools returns a NodeSlicePools
+func newNodeSlicePools(c *WhereaboutsV1alpha1Client, namespace string) *nodeSlicePools {
+	return &nodeSlicePools{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the nodeSlicePool, and returns the corresponding nodeSlicePool object, and an error if there is any.
+func (c *nodeSlicePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	result = &v1alpha1.NodeSlicePool{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of NodeSlicePools that match those selectors.
+func (c *nodeSlicePools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeSlicePoolList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.NodeSlicePoolList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested nodeSlicePools.
+func (c *nodeSlicePools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch(ctx)
+}
+
+// Create takes the representation of a nodeSlicePool and creates it.  Returns the server's representation of the nodeSlicePool, and an error, if there is any.
+func (c *nodeSlicePools) Create(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.CreateOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	result = &v1alpha1.NodeSlicePool{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(nodeSlicePool).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Update takes the representation of a nodeSlicePool and updates it. Returns the server's representation of the nodeSlicePool, and an error, if there is any.
+func (c *nodeSlicePools) Update(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	result = &v1alpha1.NodeSlicePool{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		Name(nodeSlicePool.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(nodeSlicePool).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *nodeSlicePools) UpdateStatus(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (result *v1alpha1.NodeSlicePool, err error) {
+	result = &v1alpha1.NodeSlicePool{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		Name(nodeSlicePool.Name).
+		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(nodeSlicePool).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Delete takes name of the nodeSlicePool and deletes it. Returns an error if one occurs.
+func (c *nodeSlicePools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		Name(name).
+		Body(&opts).
+		Do(ctx).
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *nodeSlicePools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+	var timeout time.Duration
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		VersionedParams(&listOpts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(&opts).
+		Do(ctx).
+		Error()
+}
+
+// Patch applies the patch and returns the patched nodeSlicePool.
+func (c *nodeSlicePools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeSlicePool, err error) {
+	result = &v1alpha1.NodeSlicePool{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("nodeslicepools").
+		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(data).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/whereabouts.cni.cncf.io_client.go b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/whereabouts.cni.cncf.io_client.go
index a4a261a45..c841c614e 100644
--- a/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/whereabouts.cni.cncf.io_client.go
+++ b/pkg/client/clientset/versioned/typed/whereabouts.cni.cncf.io/v1alpha1/whereabouts.cni.cncf.io_client.go
@@ -28,6 +28,7 @@ import (
 type WhereaboutsV1alpha1Interface interface {
 	RESTClient() rest.Interface
 	IPPoolsGetter
+	NodeSlicePoolsGetter
 	OverlappingRangeIPReservationsGetter
 }
 
@@ -40,6 +41,10 @@ func (c *WhereaboutsV1alpha1Client) IPPools(namespace string) IPPoolInterface {
 	return newIPPools(c, namespace)
 }
 
+func (c *WhereaboutsV1alpha1Client) NodeSlicePools(namespace string) NodeSlicePoolInterface {
+	return newNodeSlicePools(c, namespace)
+}
+
 func (c *WhereaboutsV1alpha1Client) OverlappingRangeIPReservations(namespace string) OverlappingRangeIPReservationInterface {
 	return newOverlappingRangeIPReservations(c, namespace)
 }
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index 24f0e1b1d..4d2c3dc0f 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -54,6 +54,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
 	// Group=whereabouts.cni.cncf.io, Version=v1alpha1
 	case v1alpha1.SchemeGroupVersion.WithResource("ippools"):
 		return &genericInformer{resource: resource.GroupResource(), informer: f.Whereabouts().V1alpha1().IPPools().Informer()}, nil
+	case v1alpha1.SchemeGroupVersion.WithResource("nodeslicepools"):
+		return &genericInformer{resource: resource.GroupResource(), informer: f.Whereabouts().V1alpha1().NodeSlicePools().Informer()}, nil
 	case v1alpha1.SchemeGroupVersion.WithResource("overlappingrangeipreservations"):
 		return &genericInformer{resource: resource.GroupResource(), informer: f.Whereabouts().V1alpha1().OverlappingRangeIPReservations().Informer()}, nil
 
diff --git a/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/interface.go b/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/interface.go
index cd1e4cf02..e2546214d 100644
--- a/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/interface.go
+++ b/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/interface.go
@@ -25,6 +25,8 @@ import (
 type Interface interface {
 	// IPPools returns a IPPoolInformer.
 	IPPools() IPPoolInformer
+	// NodeSlicePools returns a NodeSlicePoolInformer.
+	NodeSlicePools() NodeSlicePoolInformer
 	// OverlappingRangeIPReservations returns a OverlappingRangeIPReservationInformer.
 	OverlappingRangeIPReservations() OverlappingRangeIPReservationInformer
 }
@@ -45,6 +47,11 @@ func (v *version) IPPools() IPPoolInformer {
 	return &iPPoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
 }
 
+// NodeSlicePools returns a NodeSlicePoolInformer.
+func (v *version) NodeSlicePools() NodeSlicePoolInformer {
+	return &nodeSlicePoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
 // OverlappingRangeIPReservations returns a OverlappingRangeIPReservationInformer.
 func (v *version) OverlappingRangeIPReservations() OverlappingRangeIPReservationInformer {
 	return &overlappingRangeIPReservationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
diff --git a/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go b/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
new file mode 100644
index 000000000..39ea336fa
--- /dev/null
+++ b/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2024 The Kubernetes Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"context"
+	time "time"
+
+	whereaboutscnicncfiov1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
+	versioned "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
+	internalinterfaces "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions/internalinterfaces"
+	v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	watch "k8s.io/apimachinery/pkg/watch"
+	cache "k8s.io/client-go/tools/cache"
+)
+
+// NodeSlicePoolInformer provides access to a shared informer and lister for
+// NodeSlicePools.
+type NodeSlicePoolInformer interface {
+	Informer() cache.SharedIndexInformer
+	Lister() v1alpha1.NodeSlicePoolLister
+}
+
+type nodeSlicePoolInformer struct {
+	factory          internalinterfaces.SharedInformerFactory
+	tweakListOptions internalinterfaces.TweakListOptionsFunc
+	namespace        string
+}
+
+// NewNodeSlicePoolInformer constructs a new informer for NodeSlicePool type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewNodeSlicePoolInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+	return NewFilteredNodeSlicePoolInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredNodeSlicePoolInformer constructs a new informer for NodeSlicePool type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredNodeSlicePoolInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+	return cache.NewSharedIndexInformer(
+		&cache.ListWatch{
+			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+				if tweakListOptions != nil {
+					tweakListOptions(&options)
+				}
+				return client.WhereaboutsV1alpha1().NodeSlicePools(namespace).List(context.TODO(), options)
+			},
+			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+				if tweakListOptions != nil {
+					tweakListOptions(&options)
+				}
+				return client.WhereaboutsV1alpha1().NodeSlicePools(namespace).Watch(context.TODO(), options)
+			},
+		},
+		&whereaboutscnicncfiov1alpha1.NodeSlicePool{},
+		resyncPeriod,
+		indexers,
+	)
+}
+
+func (f *nodeSlicePoolInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+	return NewFilteredNodeSlicePoolInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *nodeSlicePoolInformer) Informer() cache.SharedIndexInformer {
+	return f.factory.InformerFor(&whereaboutscnicncfiov1alpha1.NodeSlicePool{}, f.defaultInformer)
+}
+
+func (f *nodeSlicePoolInformer) Lister() v1alpha1.NodeSlicePoolLister {
+	return v1alpha1.NewNodeSlicePoolLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/expansion_generated.go b/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/expansion_generated.go
index 8fcd90e06..6495d1a1a 100644
--- a/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/expansion_generated.go
+++ b/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/expansion_generated.go
@@ -25,6 +25,14 @@ type IPPoolListerExpansion interface{}
 // IPPoolNamespaceLister.
 type IPPoolNamespaceListerExpansion interface{}
 
+// NodeSlicePoolListerExpansion allows custom methods to be added to
+// NodeSlicePoolLister.
+type NodeSlicePoolListerExpansion interface{}
+
+// NodeSlicePoolNamespaceListerExpansion allows custom methods to be added to
+// NodeSlicePoolNamespaceLister.
+type NodeSlicePoolNamespaceListerExpansion interface{}
+
 // OverlappingRangeIPReservationListerExpansion allows custom methods to be added to
 // OverlappingRangeIPReservationLister.
 type OverlappingRangeIPReservationListerExpansion interface{}
diff --git a/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go b/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
new file mode 100644
index 000000000..1e6ae8bb5
--- /dev/null
+++ b/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1/nodeslicepool.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2024 The Kubernetes Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/client-go/tools/cache"
+)
+
+// NodeSlicePoolLister helps list NodeSlicePools.
+// All objects returned here must be treated as read-only.
+type NodeSlicePoolLister interface {
+	// List lists all NodeSlicePools in the indexer.
+	// Objects returned here must be treated as read-only.
+	List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error)
+	// NodeSlicePools returns an object that can list and get NodeSlicePools.
+	NodeSlicePools(namespace string) NodeSlicePoolNamespaceLister
+	NodeSlicePoolListerExpansion
+}
+
+// nodeSlicePoolLister implements the NodeSlicePoolLister interface.
+type nodeSlicePoolLister struct {
+	indexer cache.Indexer
+}
+
+// NewNodeSlicePoolLister returns a new NodeSlicePoolLister.
+func NewNodeSlicePoolLister(indexer cache.Indexer) NodeSlicePoolLister {
+	return &nodeSlicePoolLister{indexer: indexer}
+}
+
+// List lists all NodeSlicePools in the indexer.
+func (s *nodeSlicePoolLister) List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error) {
+	err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+		ret = append(ret, m.(*v1alpha1.NodeSlicePool))
+	})
+	return ret, err
+}
+
+// NodeSlicePools returns an object that can list and get NodeSlicePools.
+func (s *nodeSlicePoolLister) NodeSlicePools(namespace string) NodeSlicePoolNamespaceLister {
+	return nodeSlicePoolNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// NodeSlicePoolNamespaceLister helps list and get NodeSlicePools.
+// All objects returned here must be treated as read-only.
+type NodeSlicePoolNamespaceLister interface {
+	// List lists all NodeSlicePools in the indexer for a given namespace.
+	// Objects returned here must be treated as read-only.
+	List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error)
+	// Get retrieves the NodeSlicePool from the indexer for a given namespace and name.
+	// Objects returned here must be treated as read-only.
+	Get(name string) (*v1alpha1.NodeSlicePool, error)
+	NodeSlicePoolNamespaceListerExpansion
+}
+
+// nodeSlicePoolNamespaceLister implements the NodeSlicePoolNamespaceLister
+// interface.
+type nodeSlicePoolNamespaceLister struct {
+	indexer   cache.Indexer
+	namespace string
+}
+
+// List lists all NodeSlicePools in the indexer for a given namespace.
+func (s nodeSlicePoolNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error) {
+	err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+		ret = append(ret, m.(*v1alpha1.NodeSlicePool))
+	})
+	return ret, err
+}
+
+// Get retrieves the NodeSlicePool from the indexer for a given namespace and name.
+func (s nodeSlicePoolNamespaceLister) Get(name string) (*v1alpha1.NodeSlicePool, error) {
+	obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		return nil, errors.NewNotFound(v1alpha1.Resource("nodeslicepool"), name)
+	}
+	return obj.(*v1alpha1.NodeSlicePool), nil
+}
diff --git a/pkg/config/config.go b/pkg/config/config.go
index ba94ce61c..0c1733d0a 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -90,6 +90,15 @@ func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*
 		n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...)
 	}
 
+	//TODO: best way to handle this range? I guess depends on how we want to configure the IPAM plugin
+	if n.IPAM.NodeSliceFastRange != "" {
+		oldRange := types.RangeConfiguration{
+			Range: n.IPAM.NodeSliceFastRange,
+		}
+
+		n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...)
+	}
+
 	for idx := range n.IPAM.IPRanges {
 		if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 {
 			firstip := netutils.ParseIPSloppy(r[0])
@@ -109,6 +118,8 @@ func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*
 		} else {
 			firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range)
 			if err != nil {
+				logging.Debugf("invalid cidr error %v", n.IPAM.IPRanges[idx].Range)
+				logging.Debugf("full ranges %v", n.IPAM.IPRanges)
 				return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err)
 			}
 			n.IPAM.IPRanges[idx].Range = ipNet.String()
diff --git a/pkg/iphelpers/iphelpers.go b/pkg/iphelpers/iphelpers.go
index f70591cfd..d4567bf7b 100644
--- a/pkg/iphelpers/iphelpers.go
+++ b/pkg/iphelpers/iphelpers.go
@@ -1,9 +1,13 @@
 package iphelpers
 
 import (
+	"encoding/binary"
+	"errors"
 	"fmt"
 	"math"
 	"net"
+	"strconv"
+	"strings"
 )
 
 // CompareIPs reports whether out of 2 given IPs, ipX and ipY, ipY is smaller (-1), the same (0) or larger (1).
@@ -25,6 +29,61 @@ func CompareIPs(ipX net.IP, ipY net.IP) int {
 	return 0
 }
 
+// DivideRangeBySize takes an ipRange i.e. 11.0.0.0/8 and a sliceSize i.e. /24
+// and returns a list of IPNets that divide the input range into sizes
+func DivideRangeBySize(inputNetwork string, sliceSizeString string) ([]string, error) {
+	// Remove "/" from the start of the sliceSize
+	if strings.HasPrefix(sliceSizeString, "/") {
+		sliceSizeString = sliceSizeString[1:]
+	}
+	sliceSize, err := strconv.Atoi(sliceSizeString)
+	if err != nil {
+		fmt.Println("Error:", err)
+		return nil, nil
+	}
+	ip, ipNet, err := net.ParseCIDR(inputNetwork)
+	if err != nil {
+		return nil, err
+	}
+	if !ip.Equal(ipNet.IP) {
+		return nil, errors.New("netCIDR is not a valid network address")
+	}
+	netMaskSize, _ := ipNet.Mask.Size()
+	if netMaskSize > int(sliceSize) {
+		return nil, errors.New("subnetMaskSize must be greater or equal than netMaskSize")
+	}
+
+	totalSubnetsInNetwork := math.Pow(2, float64(sliceSize)-float64(netMaskSize))
+	totalHostsInSubnet := math.Pow(2, 32-float64(sliceSize))
+	subnetIntAddresses := make([]uint32, int(totalSubnetsInNetwork))
+	// first subnet address is same as the network address
+	subnetIntAddresses[0] = ip2int(ip.To4())
+	for i := 1; i < int(totalSubnetsInNetwork); i++ {
+		subnetIntAddresses[i] = subnetIntAddresses[i-1] + uint32(totalHostsInSubnet)
+	}
+
+	subnetCIDRs := make([]string, 0)
+	for _, sia := range subnetIntAddresses {
+		subnetCIDRs = append(
+			subnetCIDRs,
+			int2ip(sia).String()+"/"+strconv.Itoa(int(sliceSize)),
+		)
+	}
+	return subnetCIDRs, nil
+}
+
+func ip2int(ip net.IP) uint32 {
+	if len(ip) == 16 {
+		panic("cannot convert IPv6 into uint32")
+	}
+	return binary.BigEndian.Uint32(ip)
+}
+func int2ip(nn uint32) net.IP {
+	ip := make(net.IP, 4)
+	binary.BigEndian.PutUint32(ip, nn)
+	return ip
+}
+
 // IsIPInRange returns true if a given IP is within the continuous range of start and end IP (inclusively).
 func IsIPInRange(in net.IP, start net.IP, end net.IP) (bool, error) {
 	if in == nil || start == nil || end == nil {
diff --git a/pkg/node-controller/controller.go b/pkg/node-controller/controller.go
new file mode 100644
index 000000000..af2f34d86
--- /dev/null
+++ b/pkg/node-controller/controller.go
@@ -0,0 +1,676 @@
+package node_controller
+
+import (
+	"context"
+	"fmt"
+	cncfV1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
+	nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned"
+	nadinformers "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1"
+	nadlisters "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1"
+	"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
+	clientset "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
+	whereaboutsInformers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1"
+	whereaboutsListers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1"
+	"github.com/k8snetworkplumbingwg/whereabouts/pkg/config"
+	"github.com/k8snetworkplumbingwg/whereabouts/pkg/iphelpers"
+	"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
+	"k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"time"
+
+	"golang.org/x/time/rate"
+	"k8s.io/apimachinery/pkg/labels"
+
+	corev1 "k8s.io/api/core/v1"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	coreinformers "k8s.io/client-go/informers/core/v1"
+	"k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/kubernetes/scheme"
+	typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+	corelisters "k8s.io/client-go/listers/core/v1"
+	"k8s.io/client-go/tools/cache"
+	"k8s.io/client-go/tools/record"
+	"k8s.io/client-go/util/workqueue"
+	"k8s.io/klog/v2"
+)
+
+const controllerAgentName = "node-controller"
+
+const (
+	whereaboutsConfigPath = "/etc/cni/net.d/whereabouts.d/whereabouts.conf"
+)
+
+const (
+	// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
+	SuccessSynced = "Synced"
+	// ErrResourceExists is used as part of the Event 'reason' when a Foo fails
+	// to sync due to a Deployment of the same name already existing.
+	ErrResourceExists = "ErrResourceExists"
+
+	// MessageResourceExists is the message used for Events when a resource
+	// fails to sync due to a Deployment already existing
+	MessageResourceExists = "Resource %q already exists and is not managed by Foo"
+	// MessageResourceSynced is the message used for an Event fired when a Foo
+	// is synced successfully
+	MessageResourceSynced = "Foo synced successfully"
+)
+
+// Controller is the controller implementation for Foo resources
+type Controller struct {
+	// kubeclientset is a standard kubernetes clientset
+	kubeclientset kubernetes.Interface
+	// sampleclientset is a clientset for our own API group
+	whereaboutsclientset clientset.Interface
+
+	nadclientset nadclient.Interface
+
+	nodeLister       corelisters.NodeLister
+	nodeInformer     coreinformers.NodeInformer
+	nodesSynced      cache.InformerSynced
+	ipPoolLister     whereaboutsListers.IPPoolLister
+	ipPoolInformer   whereaboutsInformers.IPPoolInformer
+	nodeIPPoolSynced cache.InformerSynced
+
+	nodeSlicePoolLister   whereaboutsListers.NodeSlicePoolLister
+	nodeSlicePoolInformer whereaboutsInformers.NodeSlicePoolInformer
+	nodeSlicePoolSynced   cache.InformerSynced
+
+	nadInformer nadinformers.NetworkAttachmentDefinitionInformer
+	nadLister   nadlisters.NetworkAttachmentDefinitionLister
+	nadSynced   cache.InformerSynced
+
+	// workqueue is a rate limited work queue. This is used to queue work to be
+	// processed instead of performing it as soon as a change happens. This
+	// means we can ensure we only process a fixed amount of resources at a
+	// time, and makes it easy to ensure we are never processing the same item
+	// simultaneously in two different workers.
+	workqueue workqueue.RateLimitingInterface
+
+	// recorder is an event recorder for recording Event resources to the
+	// Kubernetes API.
+	recorder record.EventRecorder
+}
+
+//TODO: handle case where node is deleted and resource slice is still there
+//TODO: event broadcaster and recordor
+
+// NewController returns a new sample controller
+func NewController(
+	ctx context.Context,
+	kubeclientset kubernetes.Interface,
+	whereaboutsclientset clientset.Interface,
+	nadclientset nadclient.Interface,
+	nodeInformer coreinformers.NodeInformer,
+	nodeSlicePoolInformer whereaboutsInformers.NodeSlicePoolInformer,
+	nodeIPPoolInformer whereaboutsInformers.IPPoolInformer,
+	nadInformer nadinformers.NetworkAttachmentDefinitionInformer,
+) *Controller {
+	logger := klog.FromContext(ctx)
+
+	logger.V(4).Info("Creating event broadcaster")
+
+	eventBroadcaster := record.NewBroadcaster()
+	eventBroadcaster.StartStructuredLogging(0)
+	eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
+	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
+	ratelimiter := workqueue.NewMaxOfRateLimiter(
+		workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
+		&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(50), 300)},
+	)
+
+	c := &Controller{
+		kubeclientset:         kubeclientset,
+		nodeLister:            nodeInformer.Lister(),
+		nodeInformer:          nodeInformer,
+		nodesSynced:           nodeInformer.Informer().HasSynced,
+		whereaboutsclientset:  whereaboutsclientset,
+		nodeSlicePoolLister:   nodeSlicePoolInformer.Lister(),
+		nodeSlicePoolInformer: nodeSlicePoolInformer,
+		nodeSlicePoolSynced:   nodeSlicePoolInformer.Informer().HasSynced,
+		ipPoolLister:          nodeIPPoolInformer.Lister(),
+		ipPoolInformer:        nodeIPPoolInformer,
+		nodeIPPoolSynced:      nodeIPPoolInformer.Informer().HasSynced,
+		nadclientset:          nadclientset,
+		nadInformer:           nadInformer,
+		nadLister:             nadInformer.Lister(),
+		nadSynced:             nadInformer.Informer().HasSynced,
+		workqueue:             workqueue.NewRateLimitingQueue(ratelimiter),
+		recorder:              recorder,
+	}
+
+	logger.Info("Setting up event handlers")
+
+	nadInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+		AddFunc: c.onNadUpdate,
+		UpdateFunc: func(old, cur interface{}) {
+			c.onNadUpdate(cur)
+		},
+		DeleteFunc: c.onNadDelete,
+	})
+
+	//nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+	//	AddFunc: c.onNodeChange,
+	//	UpdateFunc: func(old, cur interface{}) {
+	//		c.onNodeChange(cur)
+	//	},
+	//	DeleteFunc: c.onNodeChange,
+	//})
+
+	// TODO: handle reconciliation of node slice changes
+	//nodeSliceIPPoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+	//	AddFunc: c.onNodeSliceChange,
+	//	UpdateFunc: func(old, cur interface{}) {
+	//		c.onNodeSliceChange(cur)
+	//	},
+	//	DeleteFunc: c.onNodeSliceChange,
+	//})
+
+	return c
+}
+
+// onNadUpdate updates the Service Selector in the cache and queues the Service for processing.
+func (c *Controller) onNadUpdate(obj interface{}) {
+	klog.Info("onNadUpdate")
+	key, err := cache.MetaNamespaceKeyFunc(obj)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
+		return
+	}
+	c.workqueue.Add(key)
+}
+
+// onNadDelete removes the Service Selector from the cache and queues the Service for processing.
+func (c *Controller) onNadDelete(obj interface{}) {
+	klog.Info("onNadDelete")
+	key, err := cache.MetaNamespaceKeyFunc(obj)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
+		return
+	}
+
+	c.workqueue.Add(key)
+}
+
+// TODO (ivelichkovich): this assumes all nodes should be part of the node slices
+// we may want to require nodes to have an annotation similar to what pods have to receive a slice
+// in this case we get all applicable NADs for the node rather than requeuing all
+// same applies to other node event handlers
+func (c *Controller) onNodeChange(obj interface{}) {
+	nadlist, err := c.nadLister.List(nil)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("couldn't get network-attachment-definition list from informer: %v", err))
+		return
+	}
+	for nad := range nadlist {
+		key, err := cache.MetaNamespaceKeyFunc(nad)
+		if err != nil {
+			utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", nad, err))
+			return
+		}
+		//TODO: maybe we want to use AddAfter and have a batch period
+		c.workqueue.Add(key)
+	}
+}
+
+//func (c *Controller) onNodeSliceChange(obj interface{}) {
+//	nadlist, err := c.nadLister.List(nil)
+//	if err != nil {
+//		utilruntime.HandleError(fmt.Errorf("couldn't get network-attachment-definition list from informer: %v", err))
+//		return
+//	}
+//	for nad := range nadlist {
+//		key, err := cache.MetaNamespaceKeyFunc(nad)
+//		if err != nil {
+//			utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", nad, err))
+//			return
+//		}
+//		//TODO: maybe we want to use AddAfter and have a batch period
+//		c.workqueue.Add(key)
+//	}
+//}
+
+// Run will set up the event handlers for types we are interested in, as well
+// as syncing informer caches and starting workers. It will block until stopCh
+// is closed, at which point it will shutdown the workqueue and wait for
+// workers to finish processing their current work items.
+func (c *Controller) Run(ctx context.Context, workers int) error {
+	defer utilruntime.HandleCrash()
+	defer c.workqueue.ShutDown()
+	logger := klog.FromContext(ctx)
+
+	// Start the informer factories to begin populating the informer caches
+	logger.Info("Starting node-slice controller")
+
+	// Wait for the caches to be synced before starting workers
+	logger.Info("Waiting for informer caches to sync")
+
+	if ok := cache.WaitForCacheSync(ctx.Done(), c.nodesSynced); !ok {
+		return fmt.Errorf("failed to wait for nodes caches to sync")
+	}
+	if ok := cache.WaitForCacheSync(ctx.Done(), c.nodeSlicePoolSynced); !ok {
+		return fmt.Errorf("failed to wait for nodeslices caches to sync")
+	}
+	if ok := cache.WaitForCacheSync(ctx.Done(), c.nodeIPPoolSynced); !ok {
+		return fmt.Errorf("failed to wait for nodeippool caches to sync")
+	}
+
+	logger.Info("Starting workers", "count", workers)
+	// Launch two workers to process Foo resources
+	for i := 0; i < workers; i++ {
+		go wait.UntilWithContext(ctx, c.runWorker, time.Second)
+	}
+
+	logger.Info("Started workers")
+	<-ctx.Done()
+	logger.Info("Shutting down workers")
+
+	return nil
+}
+
+// runWorker is a long-running function that will continually call the
+// processNextWorkItem function in order to read and process a message on the
+// workqueue.
+func (c *Controller) runWorker(ctx context.Context) {
+	for c.processNextWorkItem(ctx) {
+	}
+}
+
+// processNextWorkItem will read a single work item off the workqueue and
+// attempt to process it, by calling the syncHandler.
+func (c *Controller) processNextWorkItem(ctx context.Context) bool {
+	obj, shutdown := c.workqueue.Get()
+	logger := klog.FromContext(ctx)
+
+	if shutdown {
+		return false
+	}
+
+	// We wrap this block in a func so we can defer c.workqueue.Done.
+	err := func(obj interface{}) error {
+		// We call Done here so the workqueue knows we have finished
+		// processing this item. We also must remember to call Forget if we
+		// do not want this work item being re-queued. For example, we do
+		// not call Forget if a transient error occurs, instead the item is
+		// put back on the workqueue and attempted again after a back-off
+		// period.
+		defer c.workqueue.Done(obj)
+		var key string
+		var ok bool
+		// We expect strings to come off the workqueue. These are of the
+		// form namespace/name. We do this as the delayed nature of the
+		// workqueue means the items in the informer cache may actually be
+		// more up to date that when the item was initially put onto the
+		// workqueue.
+		if key, ok = obj.(string); !ok {
+			// As the item in the workqueue is actually invalid, we call
+			// Forget here else we'd go into a loop of attempting to
+			// process a work item that is invalid.
+			c.workqueue.Forget(obj)
+			utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
+			return nil
+		}
+		// Run the syncHandler, passing it the namespace/name string of the
+		// Foo resource to be synced.
+		if err := c.syncHandler(ctx, key); err != nil {
+			// Put the item back on the workqueue to handle any transient errors.
+			c.workqueue.AddRateLimited(key)
+			return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
+		}
+		// Finally, if no error occurs we Forget this item so it does not
+		// get queued again until another change happens.
+		c.workqueue.Forget(obj)
+		logger.Info("Successfully synced", "resourceName", key)
+		return nil
+	}(obj)
+
+	if err != nil {
+		utilruntime.HandleError(err)
+		return true
+	}
+
+	return true
+}
+
+// syncHandler compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the Foo resource
+// with the current status of the resource.
+func (c *Controller) syncHandler(ctx context.Context, key string) error {
+	// Convert the namespace/name string into a distinct namespace and name
+	logger := klog.LoggerWithValues(klog.FromContext(ctx), "resourceName", key)
+
+	namespace, name, err := cache.SplitMetaNamespaceKey(key)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
+		return nil
+	}
+
+	nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(name)
+	if err != nil {
+		if !errors.IsNotFound(err) {
+			return err
+		}
+		// in this case the nad dne so it must've been deleted so we will cleanup nodeslicepools
+		// if we are down during the delete this could be missed similar to endpoints see kubernetes #6877
+		//TODO: do we want to cleanup IP Pools?
+		//err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Delete(ctx, name, metav1.DeleteOptions{})
+		//if err != nil && !errors.IsNotFound(err) {
+		//	return err
+		//}
+		//
+		//iPPools, err := c.ipPoolLister.IPPools(namespace).List(nil)
+		//if err != nil {
+		//	return err
+		//}
+		//for _, slicepool := range iPPools {
+		//	if ownerRef := metav1.GetControllerOf(slicepool); ownerRef != nil {
+		//		// If this object is not owned by a NodeSlicePool, we should not do anything more
+		//		// with it.
+		//		if ownerRef.Kind != "NodeSlicePool" || ownerRef.Name == name {
+		//			continue
+		//		}
+		//
+		//		err = c.whereaboutsclientset.WhereaboutsV1alpha1().IPPools(namespace).Delete(ctx, name, metav1.DeleteOptions{})
+		//		if err != nil {
+		//			if !errors.IsNotFound(err) {
+		//				return err
+		//			}
+		//		}
+		//	}
+		//}
+		return nil
+	}
+	//nad does exist so did it change node_slice_range or slice_size
+	//TODO: error syncing 'default/igor-1': config file not found, requeuing
+	// can we make this work without a local config file
+	ipamConf, err := ipamConfiguration(nad, "")
+	if err != nil {
+		return err
+	}
+
+	// This is to support several NADs and interfaces on the same network
+	//TODO: validation that nad configs are compatible between different nads with same network-name
+	sliceName := name
+	if ipamConf.NetworkName != "" {
+		sliceName = ipamConf.NetworkName
+	}
+
+	logger.Info(fmt.Sprintf("%v", ipamConf))
+	if ipamConf.Range != "" {
+		logger.Info("skipping update node slices for network-attachment-definition due to range being set",
+			"network-attachment-definition", klog.KRef(namespace, name))
+		return nil
+	}
+	logger.Info(fmt.Sprintf("slicesize: %v", ipamConf.NodeSliceSize))
+	if ipamConf.NodeSliceFastRange == "" || ipamConf.NodeSliceSize == "" {
+		logger.Info("skipping update node slices for network-attachment-definition due missing node slice configurations",
+			"network-attachment-definition", klog.KRef(namespace, name))
+		return nil
+	}
+
+	logger.Info("About to update node slices for network-attachment-definition",
+		"network-attachment-definition", klog.KRef(namespace, name))
+
+	currentNodeSlicePool, err := c.nodeSlicePoolLister.NodeSlicePools(namespace).Get(sliceName)
+	if err != nil {
+		logger.Info("node slice pool does not exist, creating")
+		if !errors.IsNotFound(err) {
+			return err
+		}
+		//Create
+		nodeslice := &v1alpha1.NodeSlicePool{
+			TypeMeta: metav1.TypeMeta{
+				Kind:       "NodeSlicePool",
+				APIVersion: "whereabouts.cni.cncf.io/v1alpha1",
+			},
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      sliceName,
+				Namespace: namespace,
+				// TODO: owner ref, if multiple NADs are for shared network and result in one NodeSlicePool
+				// can we just list each of them as owners?
+				//OwnerReferences: []metav1.OwnerReference{
+				//	*metav1.NewControllerRef(nad, cncfV1.SchemeGroupVersion.WithKind("NetworkAttachmentDefinition")),
+				//},
+			},
+			Spec: v1alpha1.NodeSlicePoolSpec{
+				Range:     ipamConf.NodeSliceFastRange,
+				SliceSize: ipamConf.NodeSliceSize,
+			},
+		}
+		allocations := []v1alpha1.NodeSliceAllocation{}
+		logger.Info(fmt.Sprintf("node slice: %v\n", nodeslice))
+		//_, ipNet, err := nodeslice.ParseCIDR()
+		//if err != nil {
+		//	return err
+		//}
+		//TODO: handle case when full
+		subnets, err := iphelpers.DivideRangeBySize(nodeslice.Spec.Range, ipamConf.NodeSliceSize)
+		logger.Info(fmt.Sprintf("subnets: %v\n", subnets))
+		for _, subnet := range subnets {
+			allocations = append(allocations, v1alpha1.NodeSliceAllocation{
+				SliceRange: subnet,
+			})
+		}
+		nodes, err := c.nodeLister.List(labels.Everything())
+		if err != nil {
+			return err
+		}
+		for _, node := range nodes {
+			logger.Info(fmt.Sprintf("assigning node to slice: %v\n", node.Name))
+			assignNodeToSlice(allocations, node.Name)
+		}
+		nodeslice.Status = v1alpha1.NodeSlicePoolStatus{
+			Allocations: allocations,
+		}
+		logger.Info(fmt.Sprintf("final allocations: %v\n", allocations))
+		_, err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Create(ctx, nodeslice, metav1.CreateOptions{})
+		if err != nil {
+			return err
+		}
+	} else {
+		// node slice currently exists
+		if currentNodeSlicePool.Spec.SliceSize != ipamConf.NodeSliceSize ||
+			currentNodeSlicePool.Spec.Range != ipamConf.NodeSliceFastRange {
+			// slices have changed so redo the slicing
+			//_, newRangeNet, err := net.ParseCIDR(ipamConf.NodeSliceSize)
+			//if err != nil {
+			//	return err
+			//}
+			subnets, err := iphelpers.DivideRangeBySize(ipamConf.NodeSliceFastRange, ipamConf.NodeSliceSize)
+			nodeslice := currentNodeSlicePool.DeepCopy()
+			allocations := []v1alpha1.NodeSliceAllocation{}
+			for _, subnet := range subnets {
+				allocations = append(allocations, v1alpha1.NodeSliceAllocation{
+					SliceRange: subnet,
+				})
+			}
+			nodes, err := c.nodeLister.List(labels.Everything())
+			if err != nil {
+				return err
+			}
+			for _, node := range nodes {
+				assignNodeToSlice(allocations, node.Name)
+			}
+			//TODO: best way to update status
+			nodeslice.Status = v1alpha1.NodeSlicePoolStatus{
+				Allocations: allocations,
+			}
+			_, err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Update(ctx, nodeslice, metav1.UpdateOptions{})
+			if err != nil {
+				return err
+			}
+		} else {
+			//slices have not changed so only make sure all nodes are assigned
+			nodeslice := currentNodeSlicePool.DeepCopy()
+			allocations := currentNodeSlicePool.Status.Allocations
+			nodes, err := c.nodeLister.List(labels.Everything())
+			if err != nil {
+				return err
+			}
+			for _, node := range nodes {
+				assignNodeToSlice(allocations, node.Name)
+			}
+			nodeslice.Status.Allocations = allocations
+			_, err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Update(context.TODO(), nodeslice, metav1.UpdateOptions{})
+			if err != nil {
+				logger.Info(fmt.Sprintf("Error updating NSP with no changes: %v", err))
+				return err
+			}
+		}
+	}
+
+	//TODO: recorder events
+	//c.recorder.Event(foo, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced)
+	return nil
+}
+
+// TODO: this shouldnt depend on localfile only nad
+func ipamConfiguration(nad *cncfV1.NetworkAttachmentDefinition, mountPath string) (*types.IPAMConfig, error) {
+	mounterWhereaboutsConfigFilePath := mountPath + whereaboutsConfigPath
+
+	ipamConfig, err := config.LoadIPAMConfiguration([]byte(nad.Spec.Config), "", mounterWhereaboutsConfigFilePath)
+	if err != nil {
+		return nil, err
+	}
+	return ipamConfig, nil
+}
+
+// TODO: does this work for updating the list and the allocation?
+func assignNodeToSlice(allocations []v1alpha1.NodeSliceAllocation, nodeName string) {
+	if nodeHasAllocation(allocations, nodeName) {
+		return
+	}
+	for i, allocation := range allocations {
+		if allocation.NodeName == "" {
+			allocations[i] = v1alpha1.NodeSliceAllocation{
+				SliceRange: allocation.SliceRange,
+				NodeName:   nodeName,
+			}
+			return
+		}
+	}
+}
+
+func nodeHasAllocation(allocations []v1alpha1.NodeSliceAllocation, nodeName string) bool {
+	for _, allocation := range allocations {
+		if allocation.NodeName == nodeName {
+			return true
+		}
+	}
+	return false
+}
+
+//func isOwnedByResource(obj v1alpha1.NodeIPPool) {
+//
+//}
+
+// TODO: move to util package
+func getIPRangesFromNodeSlices(slices []*v1alpha1.NodeSlicePool, excludeSlice string) []string {
+	return []string{}
+}
+
+//func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1.Deployment) error {
+//	// NEVER modify objects from the store. It's a read-only, local cache.
+//	// You can use DeepCopy() to make a deep copy of original object and modify this copy
+//	// Or create a copy manually for better performance
+//	fooCopy := foo.DeepCopy()
+//	fooCopy.Status.AvailableReplicas = deployment.Status.AvailableReplicas
+//	// If the CustomResourceSubresources feature gate is not enabled,
+//	// we must use Update instead of UpdateStatus to update the Status block of the Foo resource.
+//	// UpdateStatus will not allow changes to the Spec of the resource,
+//	// which is ideal for ensuring nothing other than resource status has been updated.
+//	_, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).UpdateStatus(context.TODO(), fooCopy, metav1.UpdateOptions{})
+//	return err
+//}
+
+// enqueueNode takes a node resource and converts it into a namespace/name
+// string which is then put onto the work queue. This method should *not* be
+// passed resources of any type other than Foo.
+func (c *Controller) enqueueNode(obj interface{}) {
+	var key string
+	var err error
+	if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
+		utilruntime.HandleError(err)
+		return
+	}
+	c.workqueue.Add(key)
+}
+
+// handleObject will take any resource implementing metav1.Object and attempt
+// to find the Foo resource that 'owns' it. It does this by looking at the
+// objects metadata.ownerReferences field for an appropriate OwnerReference.
+// It then enqueues that Foo resource to be processed. If the object does not
+// have an appropriate OwnerReference, it will simply be skipped.
+//func (c *Controller) handleObject(obj interface{}) {
+//	var object metav1.Object
+//	var ok bool
+//	logger := klog.FromContext(context.Background())
+//	if object, ok = obj.(metav1.Object); !ok {
+//		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
+//		if !ok {
+//			utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
+//			return
+//		}
+//		object, ok = tombstone.Obj.(metav1.Object)
+//		if !ok {
+//			utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
+//			return
+//		}
+//		logger.V(4).Info("Recovered deleted object", "resourceName", object.GetName())
+//	}
+//	logger.V(4).Info("Processing object", "object", klog.KObj(object))
+//	if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
+//		// If this object is not owned by a Foo, we should not do anything more
+//		// with it.
+//		if ownerRef.Kind != "Foo" {
+//			return
+//		}
+//
+//		foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
+//		if err != nil {
+//			logger.V(4).Info("Ignore orphaned object", "object", klog.KObj(object), "foo", ownerRef.Name)
+//			return
+//		}
+//
+//		c.enqueueFoo(foo)
+//		return
+//	}
+//}
+//
+//// newDeployment creates a new Deployment for a Foo resource. It also sets
+//// the appropriate OwnerReferences on the resource so handleObject can discover
+//// the Foo resource that 'owns' it.
+//func newDeployment(foo *samplev1alpha1.Foo) *appsv1.Deployment {
+//	labels := map[string]string{
+//		"app":        "nginx",
+//		"controller": foo.Name,
+//	}
+//	return &appsv1.Deployment{
+//		ObjectMeta: metav1.ObjectMeta{
+//			Name:      foo.Spec.DeploymentName,
+//			Namespace: foo.Namespace,
+//			OwnerReferences: []metav1.OwnerReference{
+//				*metav1.NewControllerRef(foo, samplev1alpha1.SchemeGroupVersion.WithKind("Foo")),
+//			},
+//		},
+//		Spec: appsv1.DeploymentSpec{
+//			Replicas: foo.Spec.Replicas,
+//			Selector: &metav1.LabelSelector{
+//				MatchLabels: labels,
+//			},
+//			Template: corev1.PodTemplateSpec{
+//				ObjectMeta: metav1.ObjectMeta{
+//					Labels: labels,
+//				},
+//				Spec: corev1.PodSpec{
+//					Containers: []corev1.Container{
+//						{
+//							Name:  "nginx",
+//							Image: "nginx:latest",
+//						},
+//					},
+//				},
+//			},
+//		},
+//	}
+//}
diff --git a/pkg/node-controller/signals/signals.go b/pkg/node-controller/signals/signals.go
new file mode 100644
index 000000000..8991d6a62
--- /dev/null
+++ b/pkg/node-controller/signals/signals.go
@@ -0,0 +1,28 @@
+package signals
+
+import (
+	"context"
+	"os"
+	"os/signal"
+)
+
+var onlyOneSignalHandler = make(chan struct{})
+
+// SetupSignalHandler registered for SIGTERM and SIGINT. A context is returned
+// which is cancelled on one of these signals. If a second signal is caught,
+// the program is terminated with exit code 1.
+func SetupSignalHandler() context.Context {
+	close(onlyOneSignalHandler) // panics when called twice
+
+	c := make(chan os.Signal, 2)
+	ctx, cancel := context.WithCancel(context.Background())
+	signal.Notify(c, shutdownSignals...)
+	go func() {
+		<-c
+		cancel()
+		<-c
+		os.Exit(1) // second signal. Exit directly.
+	}()
+
+	return ctx
+}
diff --git a/pkg/node-controller/signals/signals_posix.go b/pkg/node-controller/signals/signals_posix.go
new file mode 100644
index 000000000..2519e917a
--- /dev/null
+++ b/pkg/node-controller/signals/signals_posix.go
@@ -0,0 +1,8 @@
+package signals
+
+import (
+	"os"
+	"syscall"
+)
+
+var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
diff --git a/pkg/storage/kubernetes/ipam.go b/pkg/storage/kubernetes/ipam.go
index 299c3ee58..f6d14e618 100644
--- a/pkg/storage/kubernetes/ipam.go
+++ b/pkg/storage/kubernetes/ipam.go
@@ -5,6 +5,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"net"
+	"os"
 	"strconv"
 	"strings"
 	"sync"
@@ -80,6 +81,7 @@ const UnnamedNetwork string = ""
 type PoolIdentifier struct {
 	IpRange     string
 	NetworkName string
+	NodeName    string
 }
 
 func toIPReservationList(allocations map[string]whereaboutsv1alpha1.IPAllocation, firstip net.IP) []whereaboutstypes.IPReservation {
@@ -128,10 +130,20 @@ func (i *KubernetesIPAM) GetIPPool(ctx context.Context, poolIdentifier PoolIdent
 }
 
 func IPPoolName(poolIdentifier PoolIdentifier) string {
-	if poolIdentifier.NetworkName == UnnamedNetwork {
-		return normalizeRange(poolIdentifier.IpRange)
+	if poolIdentifier.NodeName != "" {
+		// fast node range naming convention
+		if poolIdentifier.NetworkName == UnnamedNetwork {
+			return fmt.Sprintf("%v-%v", normalizeRange(poolIdentifier.IpRange), poolIdentifier.NodeName)
+		} else {
+			return fmt.Sprintf("%v-%v", poolIdentifier.NetworkName, poolIdentifier.NodeName)
+		}
 	} else {
-		return fmt.Sprintf("%s-%s", poolIdentifier.NetworkName, normalizeRange(poolIdentifier.IpRange))
+		// default naming convention
+		if poolIdentifier.NetworkName == UnnamedNetwork {
+			return normalizeRange(poolIdentifier.IpRange)
+		} else {
+			return fmt.Sprintf("%s-%s", poolIdentifier.NetworkName, normalizeRange(poolIdentifier.IpRange))
+		}
 	}
 }
 
@@ -343,9 +355,18 @@ func (p *KubernetesIPPool) Update(ctx context.Context, reservations []whereabout
 	return nil
 }
 
+// TODO: node name probably not there on host, we can write to a file during install cni maybe
+func getNodeName() string {
+	envName := os.Getenv("NODENAME")
+	if envName != "" {
+		return envName
+	}
+	return "proper-gator" //TEMP TEST NAME
+}
+
 // newLeaderElector creates a new leaderelection.LeaderElector and associated
 // channels by which to observe elections and depositions.
-func newLeaderElector(clientset kubernetes.Interface, namespace string, podNamespace string, podID string, leaseDuration int, renewDeadline int, retryPeriod int) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
+func newLeaderElector(clientset kubernetes.Interface, namespace string, ipamConf whereaboutstypes.IPAMConfig) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
 	//log.WithField("context", "leaderelection")
 	// leaderOK will block gRPC startup until it's closed.
 	leaderOK := make(chan struct{})
@@ -353,14 +374,21 @@ func newLeaderElector(clientset kubernetes.Interface, namespace string, podNames
 	// we are deposed as leader so that we can clean up.
 	deposed := make(chan struct{})
 
+	leaseName := "whereabouts"
+	//TODO: error handle env var not set
+	if ipamConf.NodeSliceFastRange != "" && ipamConf.NodeSliceSize != "" {
+		// we lock per IP Pool so just use the pool name for the lease name
+		leaseName = IPPoolName(PoolIdentifier{NodeName: getNodeName(), NetworkName: ipamConf.NetworkName})
+	}
+
 	var rl = &resourcelock.LeaseLock{
 		LeaseMeta: metav1.ObjectMeta{
-			Name:      "whereabouts",
+			Name:      leaseName,
 			Namespace: namespace,
 		},
 		Client: clientset.CoordinationV1(),
 		LockConfig: resourcelock.ResourceLockConfig{
-			Identity: fmt.Sprintf("%s/%s", podNamespace, podID),
+			Identity: fmt.Sprintf("%s/%s", ipamConf.PodNamespace, ipamConf.PodName),
 		},
 	}
 
@@ -368,9 +396,9 @@ func newLeaderElector(clientset kubernetes.Interface, namespace string, podNames
 	// !bang
 	le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
 		Lock:            rl,
-		LeaseDuration:   time.Duration(leaseDuration) * time.Millisecond,
-		RenewDeadline:   time.Duration(renewDeadline) * time.Millisecond,
-		RetryPeriod:     time.Duration(retryPeriod) * time.Millisecond,
+		LeaseDuration:   time.Duration(ipamConf.LeaderLeaseDuration) * time.Millisecond,
+		RenewDeadline:   time.Duration(ipamConf.LeaderRenewDeadline) * time.Millisecond,
+		RetryPeriod:     time.Duration(ipamConf.LeaderRetryPeriod) * time.Millisecond,
 		ReleaseOnCancel: true,
 		Callbacks: leaderelection.LeaderCallbacks{
 			OnStartedLeading: func(_ context.Context) {
@@ -401,7 +429,7 @@ func IPManagement(ctx context.Context, mode int, ipamConf whereaboutstypes.IPAMC
 	}
 
 	// setup leader election
-	le, leader, deposed := newLeaderElector(client.clientSet, client.namespace, ipamConf.PodNamespace, ipamConf.PodName, ipamConf.LeaderLeaseDuration, ipamConf.LeaderRenewDeadline, ipamConf.LeaderRetryPeriod)
+	le, leader, deposed := newLeaderElector(client.clientSet, client.namespace, ipamConf)
 	var wg sync.WaitGroup
 	wg.Add(2)
 
@@ -455,11 +483,36 @@ func IPManagement(ctx context.Context, mode int, ipamConf whereaboutstypes.IPAMC
 	return newips, err
 }
 
+func getNodeSlicePoolRange(ctx context.Context, ipam *KubernetesIPAM) (string, error) {
+	//TODO: need to set namespace similar to
+	logging.Debugf("ipam namespace is %v", ipam.namespace)
+	nodeSlice, err := ipam.client.WhereaboutsV1alpha1().NodeSlicePools("default").Get(ctx, getNodeSliceName(ipam), metav1.GetOptions{})
+	if err != nil {
+		logging.Errorf("error getting node slice %v", err)
+		return "", err
+	}
+	for _, allocation := range nodeSlice.Status.Allocations {
+		//TODO: NODENAME may not exist when this runs on the host and not controller
+		if allocation.NodeName == getNodeName() {
+			return allocation.SliceRange, nil
+		}
+	}
+	logging.Errorf("error finding node within node slice allocations")
+	return "", fmt.Errorf("no allocated node slice for node")
+}
+
+func getNodeSliceName(ipam *KubernetesIPAM) string {
+	if ipam.Config.NetworkName == UnnamedNetwork {
+		return ipam.Config.Name
+	}
+	return ipam.Config.NetworkName
+}
+
 // IPManagementKubernetesUpdate manages k8s updates
 func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *KubernetesIPAM, ipamConf whereaboutstypes.IPAMConfig,
 	containerID string, podRef string) ([]net.IPNet, error) {
 	logging.Debugf("IPManagement -- mode: %v / containerID: %v / podRef: %v", mode, containerID, podRef)
-
+	logging.Debugf("test")
 	var newips []net.IPNet
 	var newip net.IPNet
 	// Skip invalid modes
@@ -481,6 +534,7 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete
 		logging.Errorf("IPAM connectivity error: %v", err)
 		return newips, err
 	}
+	logging.Debugf("test2")
 
 	// handle the ip add/del until successful
 	var overlappingrangeallocations []whereaboutstypes.IPReservation
@@ -494,14 +548,25 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete
 			default:
 				// retry the IPAM loop if the context has not been cancelled
 			}
-
+			logging.Debugf("test3")
 			overlappingrangestore, err = ipam.GetOverlappingRangeStore()
 			if err != nil {
 				logging.Errorf("IPAM error getting OverlappingRangeStore: %v", err)
 				return newips, err
 			}
-
-			pool, err = ipam.GetIPPool(requestCtx, PoolIdentifier{IpRange: ipRange.Range, NetworkName: ipamConf.NetworkName})
+			poolIdentifier := PoolIdentifier{IpRange: ipRange.Range, NetworkName: ipamConf.NetworkName}
+			if ipamConf.NodeSliceFastRange != "" {
+				logging.Debugf("test4")
+				//TODO: validate env vars
+				poolIdentifier.NodeName = getNodeName()
+				nodeSliceRange, err := getNodeSlicePoolRange(ctx, ipam)
+				if err != nil {
+					return newips, err
+				}
+				poolIdentifier.IpRange = nodeSliceRange
+			}
+			logging.Debugf("using pool identifier: %v", poolIdentifier)
+			pool, err = ipam.GetIPPool(requestCtx, poolIdentifier)
 			if err != nil {
 				logging.Errorf("IPAM error reading pool allocations (attempt: %d): %v", j, err)
 				if e, ok := err.(storage.Temporary); ok && e.Temporary() {
diff --git a/pkg/types/types.go b/pkg/types/types.go
index 519a094a5..9d278ce1b 100644
--- a/pkg/types/types.go
+++ b/pkg/types/types.go
@@ -54,6 +54,8 @@ type IPAMConfig struct {
 	OmitRanges               []string             `json:"exclude,omitempty"`
 	DNS                      cnitypes.DNS         `json:"dns"`
 	Range                    string               `json:"range"`
+	NodeSliceFastRange       string               `json:"nodeSliceFastRange"`
+	NodeSliceSize            string               `json:"nodeSliceSize"`
 	RangeStart               net.IP               `json:"range_start,omitempty"`
 	RangeEnd                 net.IP               `json:"range_end,omitempty"`
 	GatewayStr               string               `json:"gateway"`
@@ -81,6 +83,8 @@ func (ic *IPAMConfig) UnmarshalJSON(data []byte) error {
 		Datastore                string               `json:"datastore"`
 		Addresses                []Address            `json:"addresses,omitempty"`
 		IPRanges                 []RangeConfiguration `json:"ipRanges"`
+		NodeSliceFastRange       string               `json:"nodeSliceFastRange"`
+		NodeSliceSize            string               `json:"nodeSliceSize"`
 		OmitRanges               []string             `json:"exclude,omitempty"`
 		DNS                      cnitypes.DNS         `json:"dns"`
 		Range                    string               `json:"range"`
@@ -128,6 +132,8 @@ func (ic *IPAMConfig) UnmarshalJSON(data []byte) error {
 		Range:                    ipamConfigAlias.Range,
 		RangeStart:               backwardsCompatibleIPAddress(ipamConfigAlias.RangeStart),
 		RangeEnd:                 backwardsCompatibleIPAddress(ipamConfigAlias.RangeEnd),
+		NodeSliceFastRange:       ipamConfigAlias.NodeSliceFastRange,
+		NodeSliceSize:            ipamConfigAlias.NodeSliceSize,
 		GatewayStr:               ipamConfigAlias.GatewayStr,
 		LeaderLeaseDuration:      ipamConfigAlias.LeaderLeaseDuration,
 		LeaderRenewDeadline:      ipamConfigAlias.LeaderRenewDeadline,