From 857d23863b68e0f2589c4b05affe2619edd4f905 Mon Sep 17 00:00:00 2001
From: Dariusz Porowski <3431813+DariuszPorowski@users.noreply.github.com>
Date: Thu, 23 Jan 2025 11:07:12 -0800
Subject: [PATCH] feat(fabric_spark_workspace_settings): add more properties to
rs/ds (#201)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
# 📥 Pull Request
## ❓ What are you trying to address
This pull request introduces new properties to the
`fabric_spark_workspace_settings` data source and resource, enhancing
its capabilities. The changes include adding new attributes for managing
job settings and notebook pipeline runs, along with updates to the
documentation and schema definitions.
## ✨ Description of new changes
Enhancements to `fabric_spark_workspace_settings`:
* Added new properties to the `fabric_spark_workspace_settings` data
source and resource:
- `high_concurrency.notebook_pipeline_run_enabled` (Boolean)
- `jobs.conservative_job_admission_enabled` (Boolean)
- `jobs.session_timeout_in_minutes` (Number)
---
.../unreleased/added-20250116-152025.yaml | 9 +++
docs/data-sources/spark_workspace_settings.md | 11 ++++
docs/resources/spark_workspace_settings.md | 17 ++++++
.../resource.tf | 6 ++
.../spark/data_spark_workspace_settings.go | 18 ++++++
.../data_spark_workspace_settings_test.go | 30 +++++----
.../spark/models_spark_workspace_settings.go | 61 ++++++++++++++++++-
.../resource_spark_workspace_settings.go | 40 ++++++++++++
.../resource_spark_workspace_settings_test.go | 19 +++++-
9 files changed, 194 insertions(+), 17 deletions(-)
create mode 100644 .changes/unreleased/added-20250116-152025.yaml
diff --git a/.changes/unreleased/added-20250116-152025.yaml b/.changes/unreleased/added-20250116-152025.yaml
new file mode 100644
index 00000000..e8809b07
--- /dev/null
+++ b/.changes/unreleased/added-20250116-152025.yaml
@@ -0,0 +1,9 @@
+kind: added
+body: |
+ Added additional properties for `fabric_spark_workspace_settings` Data-Source and Resource:
+ - `high_concurrency.notebook_pipeline_run_enabled` (Boolean)
+ - `job.conservative_job_admission_enabled` (Boolen)
+ - `job.session_timeout_in_minutes` (Number)
+time: 2025-01-16T15:20:25.9324812-08:00
+custom:
+ Issue: "201"
diff --git a/docs/data-sources/spark_workspace_settings.md b/docs/data-sources/spark_workspace_settings.md
index a3987804..844f20db 100644
--- a/docs/data-sources/spark_workspace_settings.md
+++ b/docs/data-sources/spark_workspace_settings.md
@@ -41,6 +41,7 @@ data "fabric_spark_workspace_settings" "example" {
- `environment` (Attributes) Environment properties. (see [below for nested schema](#nestedatt--environment))
- `high_concurrency` (Attributes) High Concurrency properties. (see [below for nested schema](#nestedatt--high_concurrency))
- `id` (String) The ID of this resource.
+- `job` (Attributes) (see [below for nested schema](#nestedatt--job))
- `pool` (Attributes) Pool properties. (see [below for nested schema](#nestedatt--pool))
@@ -75,6 +76,16 @@ Read-Only:
Read-Only:
- `notebook_interactive_run_enabled` (Boolean) The status of the high concurrency for notebook interactive run. `false` - Disabled, `true` - Enabled.
+- `notebook_pipeline_run_enabled` (Boolean) The status of the high concurrency for notebook pipeline run. `false` - Disabled, `true` - Enabled.
+
+
+
+### Nested Schema for `job`
+
+Read-Only:
+
+- `conservative_job_admission_enabled` (Boolean) Reserve maximum cores for active Spark jobs. When this setting is enabled, your Fabric capacity reserves the maximum number of cores needed for active Spark jobs, ensuring job reliability by making sure that cores are available if a job scales up. When this setting is disabled, jobs are started based on the minimum number of cores needed, letting more jobs run at the same time. `false` - Disabled, `true` - Enabled.
+- `session_timeout_in_minutes` (Number) Time to terminate inactive Spark sessions. The maximum is 14 days (20160 minutes).
diff --git a/docs/resources/spark_workspace_settings.md b/docs/resources/spark_workspace_settings.md
index 24e92856..3ab66d34 100644
--- a/docs/resources/spark_workspace_settings.md
+++ b/docs/resources/spark_workspace_settings.md
@@ -41,6 +41,12 @@ resource "fabric_spark_workspace_settings" "example" {
*/
}
+ job = {
+ /*
+ your settings here
+ */
+ }
+
pool = {
/*
your settings here
@@ -91,6 +97,7 @@ resource "fabric_spark_workspace_settings" "example2" {
- `automatic_log` (Attributes) Automatic Log properties. (see [below for nested schema](#nestedatt--automatic_log))
- `environment` (Attributes) Environment properties. (see [below for nested schema](#nestedatt--environment))
- `high_concurrency` (Attributes) High Concurrency properties. (see [below for nested schema](#nestedatt--high_concurrency))
+- `job` (Attributes) Jobs properties. (see [below for nested schema](#nestedatt--job))
- `pool` (Attributes) Pool properties. (see [below for nested schema](#nestedatt--pool))
- `timeouts` (Attributes) (see [below for nested schema](#nestedatt--timeouts))
@@ -122,6 +129,16 @@ Optional:
Optional:
- `notebook_interactive_run_enabled` (Boolean) The status of the high concurrency for notebook interactive run. `false` - Disabled, `true` - Enabled.
+- `notebook_pipeline_run_enabled` (Boolean) The status of the high concurrency for notebook pipeline run. `false` - Disabled, `true` - Enabled.
+
+
+
+### Nested Schema for `job`
+
+Optional:
+
+- `conservative_job_admission_enabled` (Boolean) Reserve maximum cores for active Spark jobs. When this setting is enabled, your Fabric capacity reserves the maximum number of cores needed for active Spark jobs, ensuring job reliability by making sure that cores are available if a job scales up. When this setting is disabled, jobs are started based on the minimum number of cores needed, letting more jobs run at the same time. `false` - Disabled, `true` - Enabled.
+- `session_timeout_in_minutes` (Number) Time to terminate inactive Spark sessions. The maximum is 14 days (20160 minutes).
diff --git a/examples/resources/fabric_spark_workspace_settings/resource.tf b/examples/resources/fabric_spark_workspace_settings/resource.tf
index ac3dfaba..b5f6db6a 100644
--- a/examples/resources/fabric_spark_workspace_settings/resource.tf
+++ b/examples/resources/fabric_spark_workspace_settings/resource.tf
@@ -20,6 +20,12 @@ resource "fabric_spark_workspace_settings" "example" {
*/
}
+ job = {
+ /*
+ your settings here
+ */
+ }
+
pool = {
/*
your settings here
diff --git a/internal/services/spark/data_spark_workspace_settings.go b/internal/services/spark/data_spark_workspace_settings.go
index adaf420c..edf15397 100644
--- a/internal/services/spark/data_spark_workspace_settings.go
+++ b/internal/services/spark/data_spark_workspace_settings.go
@@ -87,6 +87,24 @@ func (d *dataSourceSparkWorkspaceSettings) Schema(ctx context.Context, _ datasou
MarkdownDescription: "The status of the high concurrency for notebook interactive run. `false` - Disabled, `true` - Enabled.",
Computed: true,
},
+ "notebook_pipeline_run_enabled": schema.BoolAttribute{
+ MarkdownDescription: "The status of the high concurrency for notebook pipeline run. `false` - Disabled, `true` - Enabled.",
+ Computed: true,
+ },
+ },
+ },
+ "job": schema.SingleNestedAttribute{
+ Computed: true,
+ CustomType: supertypes.NewSingleNestedObjectTypeOf[jobPropertiesModel](ctx),
+ Attributes: map[string]schema.Attribute{
+ "conservative_job_admission_enabled": schema.BoolAttribute{
+ MarkdownDescription: "Reserve maximum cores for active Spark jobs. When this setting is enabled, your Fabric capacity reserves the maximum number of cores needed for active Spark jobs, ensuring job reliability by making sure that cores are available if a job scales up. When this setting is disabled, jobs are started based on the minimum number of cores needed, letting more jobs run at the same time. `false` - Disabled, `true` - Enabled.",
+ Computed: true,
+ },
+ "session_timeout_in_minutes": schema.Int32Attribute{
+ MarkdownDescription: "Time to terminate inactive Spark sessions. The maximum is 14 days (20160 minutes).",
+ Computed: true,
+ },
},
},
"pool": schema.SingleNestedAttribute{
diff --git a/internal/services/spark/data_spark_workspace_settings_test.go b/internal/services/spark/data_spark_workspace_settings_test.go
index 27b36feb..ddc3af9f 100644
--- a/internal/services/spark/data_spark_workspace_settings_test.go
+++ b/internal/services/spark/data_spark_workspace_settings_test.go
@@ -18,27 +18,31 @@ var (
)
func TestAcc_SparkWorkspaceSettingsDataSource(t *testing.T) {
- capacity := testhelp.WellKnown()["Capacity"].(map[string]any)
- capacityID := capacity["id"].(string)
-
- workspaceResourceHCL, workspaceResourceFQN := testhelp.TestAccWorkspaceResource(t, capacityID)
+ workspace := testhelp.WellKnown()["WorkspaceDS"].(map[string]any)
+ workspaceID := workspace["id"].(string)
resource.ParallelTest(t, testhelp.NewTestAccCase(t, &testDataSourceSparkWorkspaceSettingsFQN, nil, []resource.TestStep{
// read
{
ResourceName: testDataSourceSparkWorkspaceSettingsFQN,
- Config: at.JoinConfigs(
- workspaceResourceHCL,
- at.CompileConfig(
- testDataSourceSparkWorkspaceSettingsHeader,
- map[string]any{
- "workspace_id": testhelp.RefByFQN(workspaceResourceFQN, "id"),
- },
- )),
+ Config: at.CompileConfig(
+ testDataSourceSparkWorkspaceSettingsHeader,
+ map[string]any{
+ "workspace_id": workspaceID,
+ },
+ ),
Check: resource.ComposeAggregateTestCheckFunc(
- resource.TestCheckResourceAttrSet(testDataSourceSparkWorkspaceSettingsFQN, "workspace_id"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "workspace_id", workspaceID),
resource.TestCheckResourceAttrSet(testDataSourceSparkWorkspaceSettingsFQN, "id"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "automatic_log.enabled", "true"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "high_concurrency.notebook_interactive_run_enabled", "true"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "high_concurrency.notebook_pipeline_run_enabled", "false"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "pool.customize_compute_enabled", "true"),
resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "pool.default_pool.name", "Starter Pool"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "pool.default_pool.type", "Workspace"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "environment.runtime_version", "1.3"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "job.conservative_job_admission_enabled", "false"),
+ resource.TestCheckResourceAttr(testDataSourceSparkWorkspaceSettingsFQN, "job.session_timeout_in_minutes", "20"),
),
},
},
diff --git a/internal/services/spark/models_spark_workspace_settings.go b/internal/services/spark/models_spark_workspace_settings.go
index 4044a86a..e6b2076d 100644
--- a/internal/services/spark/models_spark_workspace_settings.go
+++ b/internal/services/spark/models_spark_workspace_settings.go
@@ -32,6 +32,7 @@ type baseSparkWorkspaceSettingsModel struct {
AutomaticLog supertypes.SingleNestedObjectValueOf[automaticLogPropertiesModel] `tfsdk:"automatic_log"`
Environment supertypes.SingleNestedObjectValueOf[environmentPropertiesModel] `tfsdk:"environment"`
HighConcurrency supertypes.SingleNestedObjectValueOf[highConcurrencyPropertiesModel] `tfsdk:"high_concurrency"`
+ Job supertypes.SingleNestedObjectValueOf[jobPropertiesModel] `tfsdk:"job"`
Pool supertypes.SingleNestedObjectValueOf[poolPropertiesModel] `tfsdk:"pool"`
}
@@ -76,6 +77,19 @@ func (to *baseSparkWorkspaceSettingsModel) set(ctx context.Context, from fabspar
to.HighConcurrency = highConcurrency
+ job := supertypes.NewSingleNestedObjectValueOfNull[jobPropertiesModel](ctx)
+
+ if from.Job != nil {
+ jobModel := &jobPropertiesModel{}
+ jobModel.set(from.Job)
+
+ if diags := job.Set(ctx, jobModel); diags.HasError() {
+ return diags
+ }
+ }
+
+ to.Job = job
+
pool := supertypes.NewSingleNestedObjectValueOfNull[poolPropertiesModel](ctx)
if from.Pool != nil {
@@ -115,10 +129,22 @@ func (to *environmentPropertiesModel) set(from *fabspark.EnvironmentProperties)
type highConcurrencyPropertiesModel struct {
NotebookInteractiveRunEnabled types.Bool `tfsdk:"notebook_interactive_run_enabled"`
+ NotebookPipelineRunEnabled types.Bool `tfsdk:"notebook_pipeline_run_enabled"`
}
func (to *highConcurrencyPropertiesModel) set(from *fabspark.HighConcurrencyProperties) {
to.NotebookInteractiveRunEnabled = types.BoolPointerValue(from.NotebookInteractiveRunEnabled)
+ to.NotebookPipelineRunEnabled = types.BoolPointerValue(from.NotebookPipelineRunEnabled)
+}
+
+type jobPropertiesModel struct {
+ ConservativeJobAdmissionEnabled types.Bool `tfsdk:"conservative_job_admission_enabled"`
+ SessionTimeoutInMinutes types.Int32 `tfsdk:"session_timeout_in_minutes"`
+}
+
+func (to *jobPropertiesModel) set(from *fabspark.JobsProperties) {
+ to.ConservativeJobAdmissionEnabled = types.BoolPointerValue(from.ConservativeJobAdmissionEnabled)
+ to.SessionTimeoutInMinutes = types.Int32PointerValue(from.SessionTimeoutInMinutes)
}
type poolPropertiesModel struct {
@@ -226,10 +252,39 @@ func (to *requestUpdateSparkWorkspaceSettings) set(ctx context.Context, from res
return diags
}
+ var reqHighConcurrency fabspark.HighConcurrencyProperties
+
if !highConcurrency.NotebookInteractiveRunEnabled.IsNull() && !highConcurrency.NotebookInteractiveRunEnabled.IsUnknown() {
- to.HighConcurrency = &fabspark.HighConcurrencyProperties{
- NotebookInteractiveRunEnabled: highConcurrency.NotebookInteractiveRunEnabled.ValueBoolPointer(),
- }
+ reqHighConcurrency.NotebookInteractiveRunEnabled = highConcurrency.NotebookInteractiveRunEnabled.ValueBoolPointer()
+ }
+
+ if !highConcurrency.NotebookPipelineRunEnabled.IsNull() && !highConcurrency.NotebookPipelineRunEnabled.IsUnknown() {
+ reqHighConcurrency.NotebookPipelineRunEnabled = highConcurrency.NotebookPipelineRunEnabled.ValueBoolPointer()
+ }
+
+ if reqHighConcurrency != (fabspark.HighConcurrencyProperties{}) {
+ to.HighConcurrency = &reqHighConcurrency
+ }
+ }
+
+ if !from.Job.IsNull() && !from.Job.IsUnknown() {
+ job, diags := from.Job.Get(ctx)
+ if diags.HasError() {
+ return diags
+ }
+
+ var reqJob fabspark.JobsProperties
+
+ if !job.ConservativeJobAdmissionEnabled.IsNull() && !job.ConservativeJobAdmissionEnabled.IsUnknown() {
+ reqJob.ConservativeJobAdmissionEnabled = job.ConservativeJobAdmissionEnabled.ValueBoolPointer()
+ }
+
+ if !job.SessionTimeoutInMinutes.IsNull() && !job.SessionTimeoutInMinutes.IsUnknown() {
+ reqJob.SessionTimeoutInMinutes = job.SessionTimeoutInMinutes.ValueInt32Pointer()
+ }
+
+ if reqJob != (fabspark.JobsProperties{}) {
+ to.Job = &reqJob
}
}
diff --git a/internal/services/spark/resource_spark_workspace_settings.go b/internal/services/spark/resource_spark_workspace_settings.go
index d4a97efa..883914d1 100644
--- a/internal/services/spark/resource_spark_workspace_settings.go
+++ b/internal/services/spark/resource_spark_workspace_settings.go
@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
+ "github.com/hashicorp/terraform-plugin-framework-validators/int32validator"
"github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/diag"
@@ -138,6 +139,44 @@ func (r *resourceSparkWorkspaceSettings) Schema(ctx context.Context, _ resource.
boolplanmodifier.UseStateForUnknown(),
},
},
+ "notebook_pipeline_run_enabled": schema.BoolAttribute{
+ MarkdownDescription: "The status of the high concurrency for notebook pipeline run. `false` - Disabled, `true` - Enabled.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ },
+ "job": schema.SingleNestedAttribute{
+ MarkdownDescription: "Jobs properties.",
+ Optional: true,
+ Computed: true,
+ CustomType: supertypes.NewSingleNestedObjectTypeOf[jobPropertiesModel](ctx),
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "conservative_job_admission_enabled": schema.BoolAttribute{
+ MarkdownDescription: "Reserve maximum cores for active Spark jobs. When this setting is enabled, your Fabric capacity reserves the maximum number of cores needed for active Spark jobs, ensuring job reliability by making sure that cores are available if a job scales up. When this setting is disabled, jobs are started based on the minimum number of cores needed, letting more jobs run at the same time. `false` - Disabled, `true` - Enabled.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "session_timeout_in_minutes": schema.Int32Attribute{
+ MarkdownDescription: "Time to terminate inactive Spark sessions. The maximum is 14 days (20160 minutes).",
+ Optional: true,
+ Computed: true,
+ Validators: []validator.Int32{
+ int32validator.AtMost(20160),
+ },
+ PlanModifiers: []planmodifier.Int32{
+ int32planmodifier.UseStateForUnknown(),
+ },
+ },
},
},
"pool": schema.SingleNestedAttribute{
@@ -259,6 +298,7 @@ func (r *resourceSparkWorkspaceSettings) ConfigValidators(_ context.Context) []r
path.MatchRoot("automatic_log"),
path.MatchRoot("environment"),
path.MatchRoot("high_concurrency"),
+ path.MatchRoot("job"),
path.MatchRoot("pool"),
),
}
diff --git a/internal/services/spark/resource_spark_workspace_settings_test.go b/internal/services/spark/resource_spark_workspace_settings_test.go
index 38146d52..dbd1bc54 100644
--- a/internal/services/spark/resource_spark_workspace_settings_test.go
+++ b/internal/services/spark/resource_spark_workspace_settings_test.go
@@ -36,11 +36,28 @@ func TestAcc_SparkWorkspaceSettingsResource_CRUD(t *testing.T) {
"automatic_log": map[string]any{
"enabled": false,
},
+ "high_concurrency": map[string]any{
+ "notebook_interactive_run_enabled": false,
+ "notebook_pipeline_run_enabled": true,
+ },
+ "job": map[string]any{
+ "conservative_job_admission_enabled": true,
+ "session_timeout_in_minutes": 60,
+ },
},
)),
Check: resource.ComposeAggregateTestCheckFunc(
- resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "pool.default_pool.name", "Starter Pool"),
+ resource.TestCheckResourceAttrSet(testResourceSparkWorkspaceSettingsFQN, "workspace_id"),
+ resource.TestCheckResourceAttrSet(testResourceSparkWorkspaceSettingsFQN, "id"),
resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "automatic_log.enabled", "false"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "high_concurrency.notebook_interactive_run_enabled", "false"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "high_concurrency.notebook_pipeline_run_enabled", "true"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "pool.customize_compute_enabled", "true"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "pool.default_pool.name", "Starter Pool"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "pool.default_pool.type", "Workspace"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "environment.runtime_version", "1.3"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "job.conservative_job_admission_enabled", "true"),
+ resource.TestCheckResourceAttr(testResourceSparkWorkspaceSettingsFQN, "job.session_timeout_in_minutes", "60"),
),
},
// Update and Read