diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go
index e177b46413..97f6a674c5 100644
--- a/cmd/cinder-csi-plugin/main.go
+++ b/cmd/cinder-csi-plugin/main.go
@@ -41,6 +41,7 @@ var (
provideControllerService bool
provideNodeService bool
noClient bool
+ withTopology bool
)
func main() {
@@ -68,6 +69,8 @@ func main() {
klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err)
}
+ cmd.PersistentFlags().BoolVar(&withTopology, "with-topology", true, "cluster is topology-aware")
+
cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")
@@ -86,7 +89,11 @@ func main() {
func handle() {
// Initialize cloud
- d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
+ d := cinder.NewDriver(&cinder.DriverOpts{
+ Endpoint: endpoint,
+ ClusterID: cluster,
+ WithTopology: withTopology,
+ })
openstack.InitOpenStackProvider(cloudConfig, httpEndpoint)
diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
index 3fc6867e3b..532c2f5796 100644
--- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
+++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
@@ -69,6 +69,13 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
The manifests default this to `unix://csi/csi.sock`, which is supplied via the `CSI_ENDPOINT` environment variable.
+
--with-topology <enabled>
+
+ If set to true then the CSI driver reports topology information and the controller respects it.
+
+ Defaults to `true` (enabled).
+
+
--cloud-config <config file> [--cloud-config <config file> ...]
This argument must be given at least once.
@@ -100,23 +107,23 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
--provide-controller-service <enabled>
- If set to true then the CSI driver does provide the controller service.
+ If set to true then the CSI driver provides the controller service.
- The default is to provide the controller service.
+ Defaults to `true` (enabled).
--provide-node-service <enabled>
- If set to true then the CSI driver does provide the node service.
+ If set to true then the CSI driver provides the node service.
- The default is to provide the node service.
+ Defaults to `true` (enabled).
--node-service-no-os-client <disabled>
If set to true then the CSI driver does not provide the OpenStack client in the node service.
- The default is to provide the OpenStack client in the node service.
+ Defaults to `false` (disabled).
diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go
index a2d42eb21a..f00587055a 100644
--- a/pkg/csi/cinder/controllerserver.go
+++ b/pkg/csi/cinder/controllerserver.go
@@ -61,6 +61,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// Volume Name
volName := req.GetName()
volCapabilities := req.GetVolumeCapabilities()
+ volParams := req.GetParameters()
if len(volName) == 0 {
return nil, status.Error(codes.InvalidArgument, "[CreateVolume] missing Volume Name")
@@ -80,13 +81,17 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// Volume Type
volType := req.GetParameters()["type"]
- // First check if volAvailability is already specified, if not get preferred from Topology
- // Required, incase vol AZ is different from node AZ
- volAvailability := req.GetParameters()["availability"]
- if volAvailability == "" {
- // Check from Topology
- if req.GetAccessibilityRequirements() != nil {
- volAvailability = util.GetAZFromTopology(topologyKey, req.GetAccessibilityRequirements())
+ var volAvailability string
+ if cs.Driver.withTopology {
+ // First check if volAvailability is already specified, if not get preferred from Topology
+ // Required, incase vol AZ is different from node AZ
+ volAvailability = volParams["availability"]
+ if volAvailability == "" {
+ accessibleTopologyReq := req.GetAccessibilityRequirements()
+ // Check from Topology
+ if accessibleTopologyReq != nil {
+ volAvailability = util.GetAZFromTopology(topologyKey, accessibleTopologyReq)
+ }
}
}
diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go
index e65d8c661d..1094cc801d 100644
--- a/pkg/csi/cinder/controllerserver_test.go
+++ b/pkg/csi/cinder/controllerserver_test.go
@@ -38,7 +38,7 @@ func init() {
osmock = new(openstack.OpenStackMock)
osmockRegionX = new(openstack.OpenStackMock)
- d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
+ d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})
fakeCs = NewControllerServer(d, map[string]openstack.IOpenStack{
"": osmock,
diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go
index 773b98a8cc..08da8d5080 100644
--- a/pkg/csi/cinder/driver.go
+++ b/pkg/csi/cinder/driver.go
@@ -56,10 +56,11 @@ type CinderDriver = Driver
//revive:enable:exported
type Driver struct {
- name string
- fqVersion string //Fully qualified version in format {Version}@{CPO version}
- endpoint string
- cluster string
+ name string
+ fqVersion string //Fully qualified version in format {Version}@{CPO version}
+ endpoint string
+ cluster string
+ withTopology bool
ids *identityServer
cs *controllerServer
@@ -71,8 +72,9 @@ type Driver struct {
}
type DriverOpts struct {
- ClusterID string
- Endpoint string
+ ClusterID string
+ Endpoint string
+ WithTopology bool
}
func NewDriver(o *DriverOpts) *Driver {
@@ -81,10 +83,12 @@ func NewDriver(o *DriverOpts) *Driver {
d.fqVersion = fmt.Sprintf("%s@%s", Version, version.Version)
d.endpoint = o.Endpoint
d.cluster = o.ClusterID
+ d.withTopology = o.WithTopology
klog.Info("Driver: ", d.name)
klog.Info("Driver version: ", d.fqVersion)
klog.Info("CSI Spec version: ", specVersion)
+ klog.Infof("Topology awareness: %T", d.withTopology)
d.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go
index bfa966c741..8bc7cb74e8 100644
--- a/pkg/csi/cinder/nodeserver.go
+++ b/pkg/csi/cinder/nodeserver.go
@@ -359,12 +359,21 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
}
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
-
nodeID, err := ns.Metadata.GetInstanceID()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] unable to retrieve instance id of node %v", err)
}
+ maxVolume := ns.Cloud.GetMaxVolLimit()
+ nodeInfo := &csi.NodeGetInfoResponse{
+ NodeId: nodeID,
+ MaxVolumesPerNode: maxVolume,
+ }
+
+ if !ns.Driver.withTopology {
+ return nodeInfo, nil
+ }
+
zone, err := ns.Metadata.GetAvailabilityZone()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] Unable to retrieve availability zone of node %v", err)
@@ -374,15 +383,9 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
for k, v := range ns.Topologies {
topologyMap[k] = v
}
- topology := &csi.Topology{Segments: topologyMap}
-
- maxVolume := ns.Cloud.GetMaxVolLimit()
+ nodeInfo.AccessibleTopology = &csi.Topology{Segments: topologyMap}
- return &csi.NodeGetInfoResponse{
- NodeId: nodeID,
- AccessibleTopology: topology,
- MaxVolumesPerNode: maxVolume,
- }, nil
+ return nodeInfo, nil
}
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go
index f0cbc1b904..59a4946863 100644
--- a/pkg/csi/cinder/nodeserver_test.go
+++ b/pkg/csi/cinder/nodeserver_test.go
@@ -40,7 +40,7 @@ var omock *openstack.OpenStackMock
func init() {
if fakeNs == nil {
- d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
+ d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})
// mock MountMock
mmock = new(mount.MountMock)
diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go
index 3eb723ddfa..eb388ecd00 100644
--- a/tests/sanity/cinder/sanity_test.go
+++ b/tests/sanity/cinder/sanity_test.go
@@ -19,7 +19,7 @@ func TestDriver(t *testing.T) {
endpoint := "unix://" + socket
cluster := "kubernetes"
- d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
+ d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster, WithTopology: true})
fakecloudprovider := getfakecloud()
openstack.OsInstances = map[string]openstack.IOpenStack{