diff --git a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml
index 542c179f5a..03cc4f1b2e 100644
--- a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml
+++ b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml
@@ -173,6 +173,7 @@ spec:
             - "--endpoint=$(CSI_ENDPOINT)"
             - "--cloud-config=$(CLOUD_CONFIG)"
             - "--cluster=$(CLUSTER_NAME)"
+            - "--provide-node-service=false"
             {{- if .Values.csi.plugin.httpEndpoint.enabled }}
             - "--http-endpoint=:{{ .Values.csi.plugin.httpEndpoint.port }}"
             {{- end }}
diff --git a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml
index e55e8d69ea..4fef26eba9 100644
--- a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml
+++ b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml
@@ -91,7 +91,8 @@ spec:
             - /bin/cinder-csi-plugin
             - "-v={{ .Values.logVerbosityLevel }}"
             - "--endpoint=$(CSI_ENDPOINT)"
-            - "--cloud-config=$(CLOUD_CONFIG)"
+            - "--provide-controller-service=false"
+            # - "--cloud-config=$(CLOUD_CONFIG)"
             {{- if .Values.csi.plugin.extraArgs }}
             {{- with .Values.csi.plugin.extraArgs }}
             {{- tpl . $ | trim | nindent 12 }}
@@ -100,8 +101,8 @@ spec:
           env:
             - name: CSI_ENDPOINT
               value: unix://csi/csi.sock
-            - name: CLOUD_CONFIG
-              value: /etc/kubernetes/{{ .Values.secret.filename }}
+            # - name: CLOUD_CONFIG
+            #   value: /etc/kubernetes/{{ .Values.secret.filename }}
             {{- if .Values.csi.plugin.extraEnv }}
               {{- toYaml .Values.csi.plugin.extraEnv | nindent 12 }}
             {{- end }}
diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go
index 5bb50621ca..863f4a2b5f 100644
--- a/cmd/cinder-csi-plugin/main.go
+++ b/cmd/cinder-csi-plugin/main.go
@@ -31,13 +31,18 @@ import (
 )
 
 var (
-	endpoint                 string
-	nodeID                   string
-	cloudConfig              []string
-	cloudNames               []string
-	additionalTopologies     map[string]string
-	cluster                  string
-	httpEndpoint             string
+	endpoint             string
+	nodeID               string
+	cloudConfig          []string
+	cloudNames           []string
+	additionalTopologies map[string]string
+	cluster              string
+	httpEndpoint         string
+
+	searchOrder           string
+	rescanOnResize        bool
+	nodeVolumeAttachLimit int64
+
 	provideControllerService bool
 	provideNodeService       bool
 )
@@ -49,6 +54,17 @@ func main() {
 		Run: func(cmd *cobra.Command, args []string) {
 			handle()
 		},
+		PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
+			f := cmd.Flags()
+
+			if provideControllerService {
+				if configs, err := f.GetStringSlice("cloud-config"); err != nil || len(configs) == 0 {
+					klog.Fatalf("Unable to mark flag cloud-config to be required")
+				}
+			}
+
+			return nil
+		},
 		Version: version.Version,
 	}
 
@@ -62,10 +78,10 @@ func main() {
 		klog.Fatalf("Unable to mark flag endpoint to be required: %v", err)
 	}
 
-	cmd.PersistentFlags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")
-	if err := cmd.MarkPersistentFlagRequired("cloud-config"); err != nil {
-		klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err)
-	}
+	cmd.Flags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service")
+	cmd.Flags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service")
+
+	cmd.Flags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")
 
 	cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
 	cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")
@@ -73,8 +89,11 @@ func main() {
 	cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.")
 	cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.")
 
-	cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)")
-	cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)")
+	cmd.PersistentFlags().StringVar(&searchOrder, "search-order", "configDrive,metadataService", "The search order for metadata service")
+
+	// Node specific flags
+	cmd.PersistentFlags().BoolVar(&rescanOnResize, "rescan-on-resize", false, "If set to true then the CSI driver will rescan the device on volume resize")
+	cmd.PersistentFlags().Int64Var(&nodeVolumeAttachLimit, "node-volume-attach-limit", 256, "The maximum number of volumes that can be attached to a node")
 
 	openstack.AddExtraFlags(pflag.CommandLine)
 
@@ -87,17 +106,19 @@ func handle() {
 	d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
 
 	openstack.InitOpenStackProvider(cloudConfig, httpEndpoint)
-	var err error
-	clouds := make(map[string]openstack.IOpenStack)
-	for _, cloudName := range cloudNames {
-		clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName)
-		if err != nil {
-			klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err)
-			return
-		}
-	}
 
 	if provideControllerService {
+		clouds := make(map[string]openstack.IOpenStack, len(cloudNames))
+		for _, cloudName := range cloudNames {
+			var err error
+
+			clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName)
+			if err != nil {
+				klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err)
+				return
+			}
+		}
+
 		d.SetupControllerService(clouds)
 	}
 
@@ -105,10 +126,26 @@ func handle() {
 		//Initialize mount
 		mount := mount.GetMountProvider()
 
+		//Backward compatibility, read [BlockStorage] parameters from cloud config
+		cfg, err := openstack.GetConfigFromFiles(cloudConfig)
+		if err == nil {
+			if cfg.Metadata.SearchOrder != "" {
+				searchOrder = cfg.Metadata.SearchOrder
+			}
+
+			if cfg.BlockStorage.RescanOnResize {
+				rescanOnResize = cfg.BlockStorage.RescanOnResize
+			}
+
+			if cfg.BlockStorage.NodeVolumeAttachLimit < nodeVolumeAttachLimit {
+				nodeVolumeAttachLimit = cfg.BlockStorage.NodeVolumeAttachLimit
+			}
+		}
+
 		//Initialize Metadata
-		metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder)
+		metadata := metadata.GetMetadataProvider(searchOrder)
 
-		d.SetupNodeService(clouds[cloudNames[0]], mount, metadata, additionalTopologies)
+		d.SetupNodeService(mount, metadata, rescanOnResize, nodeVolumeAttachLimit, additionalTopologies)
 	}
 
 	d.Run()
diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go
index a2d42eb21a..0a18c04bcb 100644
--- a/pkg/csi/cinder/controllerserver.go
+++ b/pkg/csi/cinder/controllerserver.go
@@ -1061,8 +1061,11 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi
 func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibleTopologyReq *csi.TopologyRequirement) *csi.CreateVolumeResponse {
 
 	var volsrc *csi.VolumeContentSource
+	resizeRequired := false
 
 	if vol.SnapshotID != "" {
+		resizeRequired = true
+
 		volsrc = &csi.VolumeContentSource{
 			Type: &csi.VolumeContentSource_Snapshot{
 				Snapshot: &csi.VolumeContentSource_SnapshotSource{
@@ -1073,6 +1076,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
 	}
 
 	if vol.SourceVolID != "" {
+		resizeRequired = true
+
 		volsrc = &csi.VolumeContentSource{
 			Type: &csi.VolumeContentSource_Volume{
 				Volume: &csi.VolumeContentSource_VolumeSource{
@@ -1083,6 +1088,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
 	}
 
 	if vol.BackupID != nil && *vol.BackupID != "" {
+		resizeRequired = true
+
 		volsrc = &csi.VolumeContentSource{
 			Type: &csi.VolumeContentSource_Snapshot{
 				Snapshot: &csi.VolumeContentSource_SnapshotSource{
@@ -1107,12 +1114,18 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
 		}
 	}
 
+	volCnx := map[string]string{}
+	if resizeRequired {
+		volCnx[ResizeRequired] = "true"
+	}
+
 	resp := &csi.CreateVolumeResponse{
 		Volume: &csi.Volume{
 			VolumeId:           vol.ID,
 			CapacityBytes:      int64(vol.Size * 1024 * 1024 * 1024),
 			AccessibleTopology: accessibleTopology,
 			ContentSource:      volsrc,
+			VolumeContext:      volCnx,
 		},
 	}
 
diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go
index 773b98a8cc..c7f0d6fe74 100644
--- a/pkg/csi/cinder/driver.go
+++ b/pkg/csi/cinder/driver.go
@@ -32,6 +32,12 @@ import (
 const (
 	driverName  = "cinder.csi.openstack.org"
 	topologyKey = "topology." + driverName + "/zone"
+
+	// MaxVolumesPerNode is the maximum number of volumes that can be attached to a node
+	MaxVolumesPerNode = 256
+
+	// ResizeRequired parameter, if set to true, will trigger a resize on mount operation
+	ResizeRequired = "resizeRequired"
 )
 
 var (
@@ -177,9 +183,14 @@ func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack)
 	d.cs = NewControllerServer(d, clouds)
 }
 
-func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata, topologies map[string]string) {
+func (d *Driver) SetupNodeService(mount mount.IMount, metadata metadata.IMetadata, rescanOnResize bool, nodeVolumeAttachLimit int64, topologies map[string]string) {
 	klog.Info("Providing node service")
-	d.ns = NewNodeServer(d, mount, metadata, cloud, topologies)
+
+	if nodeVolumeAttachLimit < 0 || nodeVolumeAttachLimit > MaxVolumesPerNode {
+		nodeVolumeAttachLimit = MaxVolumesPerNode
+	}
+
+	d.ns = NewNodeServer(d, mount, metadata, nodeVolumeAttachLimit, rescanOnResize, topologies)
 }
 
 func (d *Driver) Run() {
diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go
index bfa966c741..73d6842b5c 100644
--- a/pkg/csi/cinder/nodeserver.go
+++ b/pkg/csi/cinder/nodeserver.go
@@ -24,16 +24,13 @@ import (
 	"strings"
 
 	"github.com/container-storage-interface/spec/lib/go/csi"
-	"github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes"
 	"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 	"k8s.io/klog/v2"
 	utilpath "k8s.io/utils/path"
 
-	"k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack"
 	"k8s.io/cloud-provider-openstack/pkg/util/blockdevice"
-	cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors"
 	"k8s.io/cloud-provider-openstack/pkg/util/metadata"
 	"k8s.io/cloud-provider-openstack/pkg/util/mount"
 	mountutil "k8s.io/mount-utils"
@@ -43,8 +40,10 @@ type nodeServer struct {
 	Driver     *Driver
 	Mount      mount.IMount
 	Metadata   metadata.IMetadata
-	Cloud      openstack.IOpenStack
 	Topologies map[string]string
+
+	maxVolumeAttachLimit int64
+	rescanOnResize       bool
 }
 
 func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
@@ -163,73 +162,10 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
 		return nil, status.Error(codes.InvalidArgument, "[NodeUnpublishVolume] volumeID must be provided")
 	}
 
-	ephemeralVolume := false
-
-	vol, err := ns.Cloud.GetVolume(volumeID)
-	if err != nil {
-
-		if !cpoerrors.IsNotFound(err) {
-			return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
-		}
-
-		// if not found by id, try to search by name
-		volName := fmt.Sprintf("ephemeral-%s", volumeID)
-
-		vols, err := ns.Cloud.GetVolumesByName(volName)
-
-		//if volume not found then GetVolumesByName returns empty list
-		if err != nil {
-			return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
-		}
-		if len(vols) > 0 {
-			vol = &vols[0]
-			ephemeralVolume = true
-		} else {
-			return nil, status.Errorf(codes.NotFound, "Volume not found %s", volName)
-		}
-	}
-
-	err = ns.Mount.UnmountPath(targetPath)
-	if err != nil {
+	if err := ns.Mount.UnmountPath(targetPath); err != nil {
 		return nil, status.Errorf(codes.Internal, "Unmount of targetpath %s failed with error %v", targetPath, err)
 	}
 
-	if ephemeralVolume {
-		return nodeUnpublishEphemeral(req, ns, vol)
-	}
-
-	return &csi.NodeUnpublishVolumeResponse{}, nil
-
-}
-
-func nodeUnpublishEphemeral(req *csi.NodeUnpublishVolumeRequest, ns *nodeServer, vol *volumes.Volume) (*csi.NodeUnpublishVolumeResponse, error) {
-	volumeID := vol.ID
-	var instanceID string
-
-	if len(vol.Attachments) > 0 {
-		instanceID = vol.Attachments[0].ServerID
-	} else {
-		return nil, status.Error(codes.FailedPrecondition, "Volume attachment not found in request")
-	}
-
-	err := ns.Cloud.DetachVolume(instanceID, volumeID)
-	if err != nil {
-		klog.V(3).Infof("Failed to DetachVolume: %v", err)
-		return nil, status.Error(codes.Internal, err.Error())
-	}
-
-	err = ns.Cloud.WaitDiskDetached(instanceID, volumeID)
-	if err != nil {
-		klog.V(3).Infof("Failed to WaitDiskDetached: %v", err)
-		return nil, status.Error(codes.Internal, err.Error())
-	}
-
-	err = ns.Cloud.DeleteVolume(volumeID)
-	if err != nil {
-		klog.V(3).Infof("Failed to DeleteVolume: %v", err)
-		return nil, status.Error(codes.Internal, err.Error())
-	}
-
 	return &csi.NodeUnpublishVolumeResponse{}, nil
 }
 
@@ -238,6 +174,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
 
 	stagingTarget := req.GetStagingTargetPath()
 	volumeCapability := req.GetVolumeCapability()
+	volumeContext := req.GetVolumeContext()
 	volumeID := req.GetVolumeId()
 
 	if len(volumeID) == 0 {
@@ -251,14 +188,6 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
 		return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided")
 	}
 
-	vol, err := ns.Cloud.GetVolume(volumeID)
-	if err != nil {
-		if cpoerrors.IsNotFound(err) {
-			return nil, status.Error(codes.NotFound, "Volume not found")
-		}
-		return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
-	}
-
 	m := ns.Mount
 	// Do not trust the path provided by cinder, get the real path on node
 	devicePath, err := getDevicePath(volumeID, m)
@@ -296,9 +225,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
 		}
 	}
 
-	// Try expanding the volume if it's created from a snapshot or another volume (see #1539)
-	if vol.SourceVolID != "" || vol.SnapshotID != "" {
-
+	if required, ok := volumeContext[ResizeRequired]; ok && strings.EqualFold(required, "true") {
 		r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec)
 
 		needResize, err := r.NeedResize(devicePath, stagingTarget)
@@ -376,12 +303,10 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
 	}
 	topology := &csi.Topology{Segments: topologyMap}
 
-	maxVolume := ns.Cloud.GetMaxVolLimit()
-
 	return &csi.NodeGetInfoResponse{
 		NodeId:             nodeID,
 		AccessibleTopology: topology,
-		MaxVolumesPerNode:  maxVolume,
+		MaxVolumesPerNode:  int64(ns.maxVolumeAttachLimit),
 	}, nil
 }
 
@@ -448,13 +373,16 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
 	if len(volumePath) == 0 {
 		return nil, status.Error(codes.InvalidArgument, "Volume path not provided")
 	}
+	volCapability := req.GetVolumeCapability()
+	if volCapability != nil {
+		if volCapability.GetBlock() != nil {
+			return &csi.NodeExpandVolumeResponse{}, nil
+		}
+	}
 
-	_, err := ns.Cloud.GetVolume(volumeID)
+	_, err := blockdevice.IsBlockDevice(volumePath)
 	if err != nil {
-		if cpoerrors.IsNotFound(err) {
-			return nil, status.Errorf(codes.NotFound, "Volume with ID %s not found", volumeID)
-		}
-		return nil, status.Errorf(codes.Internal, "NodeExpandVolume failed with error %v", err)
+		return nil, status.Errorf(codes.NotFound, "Failed to determine device path for volumePath %s: %v", volumePath, err)
 	}
 
 	output, err := ns.Mount.GetMountFs(volumePath)
@@ -467,13 +395,14 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
 		return nil, status.Error(codes.Internal, "Unable to find Device path for volume")
 	}
 
-	if ns.Cloud.GetBlockStorageOpts().RescanOnResize {
+	if ns.rescanOnResize {
 		// comparing current volume size with the expected one
 		newSize := req.GetCapacityRange().GetRequiredBytes()
 		if err := blockdevice.RescanBlockDeviceGeometry(devicePath, volumePath, newSize); err != nil {
 			return nil, status.Errorf(codes.Internal, "Could not verify %q volume size: %v", volumeID, err)
 		}
 	}
+
 	r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec)
 	if _, err := r.Resize(devicePath, volumePath); err != nil {
 		return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err)
@@ -499,7 +428,6 @@ func getDevicePath(volumeID string, m mount.IMount) (string, error) {
 	}
 
 	return devicePath, nil
-
 }
 
 func collectMountOptions(fsType string, mntFlags []string) []string {
diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go
index f0cbc1b904..e871b6bc45 100644
--- a/pkg/csi/cinder/nodeserver_test.go
+++ b/pkg/csi/cinder/nodeserver_test.go
@@ -54,7 +54,7 @@ func init() {
 			"": omock,
 		}
 
-		fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""], map[string]string{})
+		fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, MaxVolumesPerNode, false, map[string]string{})
 	}
 }
 
@@ -263,45 +263,6 @@ func TestNodeUnpublishVolume(t *testing.T) {
 	assert.Equal(expectedRes, actualRes)
 }
 
-func TestNodeUnpublishVolumeEphermeral(t *testing.T) {
-	mount.MInstance = mmock
-	metadata.MetadataService = metamock
-	osmock := map[string]openstack.IOpenStack{
-		"": new(openstack.OpenStackMock),
-	}
-	fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID)
-
-	mmock.On("UnmountPath", FakeTargetPath).Return(nil)
-	omock.On("GetVolumesByName", fvolName).Return(FakeVolList, nil)
-	omock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil)
-	omock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil)
-	omock.On("DeleteVolume", FakeVolID).Return(nil)
-
-	d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
-	fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""], map[string]string{})
-
-	// Init assert
-	assert := assert.New(t)
-
-	// Expected Result
-	expectedRes := &csi.NodeUnpublishVolumeResponse{}
-
-	// Fake request
-	fakeReq := &csi.NodeUnpublishVolumeRequest{
-		VolumeId:   FakeVolID,
-		TargetPath: FakeTargetPath,
-	}
-
-	// Invoke NodeUnpublishVolume
-	actualRes, err := fakeNse.NodeUnpublishVolume(FakeCtx, fakeReq)
-	if err != nil {
-		t.Errorf("failed to NodeUnpublishVolume: %v", err)
-	}
-
-	// Assert
-	assert.Equal(expectedRes, actualRes)
-}
-
 // Test NodeUnstageVolume
 func TestNodeUnstageVolume(t *testing.T) {
 
@@ -335,10 +296,19 @@ func TestNodeExpandVolume(t *testing.T) {
 	// Init assert
 	assert := assert.New(t)
 
+	// setup for test
+	tempDir := os.TempDir()
+	defer os.Remove(tempDir)
+	volumePath := filepath.Join(tempDir, FakeTargetPath)
+	err := os.MkdirAll(volumePath, 0750)
+	if err != nil {
+		t.Fatalf("Failed to set up volumepath: %v", err)
+	}
+
 	// Fake request
 	fakeReq := &csi.NodeExpandVolumeRequest{
 		VolumeId:   FakeVolName,
-		VolumePath: FakeDevicePath,
+		VolumePath: volumePath,
 	}
 
 	// Expected Result
diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go
index 13c1ee993a..f54c630af6 100644
--- a/pkg/csi/cinder/utils.go
+++ b/pkg/csi/cinder/utils.go
@@ -57,13 +57,14 @@ func NewIdentityServer(d *Driver) *identityServer {
 	}
 }
 
-func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, cloud openstack.IOpenStack, topologies map[string]string) *nodeServer {
+func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, maxVolumes int64, rescanOnResize bool, topologies map[string]string) *nodeServer {
 	return &nodeServer{
-		Driver:     d,
-		Mount:      mount,
-		Metadata:   metadata,
-		Cloud:      cloud,
-		Topologies: topologies,
+		Driver:               d,
+		Mount:                mount,
+		Metadata:             metadata,
+		Topologies:           topologies,
+		maxVolumeAttachLimit: maxVolumes,
+		rescanOnResize:       rescanOnResize,
 	}
 }
 
diff --git a/pkg/util/blockdevice/blockdevice_unsupported.go b/pkg/util/blockdevice/blockdevice_unsupported.go
index a75c69be7b..b9adad44d3 100644
--- a/pkg/util/blockdevice/blockdevice_unsupported.go
+++ b/pkg/util/blockdevice/blockdevice_unsupported.go
@@ -34,3 +34,7 @@ func GetBlockDeviceSize(path string) (int64, error) {
 func RescanBlockDeviceGeometry(devicePath string, deviceMountPath string, newSize int64) error {
 	return errors.New("RescanBlockDeviceGeometry is not implemented for this OS")
 }
+
+func RescanDevice(devicePath string) error {
+	return errors.New("RescanDevice is not implemented for this OS")
+}
diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go
index 3eb723ddfa..199a9e9fe3 100644
--- a/tests/sanity/cinder/sanity_test.go
+++ b/tests/sanity/cinder/sanity_test.go
@@ -30,7 +30,7 @@ func TestDriver(t *testing.T) {
 	fakemet := &fakemetadata{}
 
 	d.SetupControllerService(openstack.OsInstances)
-	d.SetupNodeService(fakecloudprovider, fakemnt, fakemet, map[string]string{})
+	d.SetupNodeService(fakemnt, fakemet, false, 200, map[string]string{})
 
 	// TODO: Stop call