Skip to content

Commit

Permalink
upsert tests: Fix load-test workflow
Browse files Browse the repository at this point in the history
Only used manually, never seems to have worked.

Context: https://materializeinc.slack.com/archives/C01LKF361MZ/p1736501436636609
  • Loading branch information
def- committed Jan 10, 2025
1 parent 769f822 commit 2e34c9d
Showing 1 changed file with 5 additions and 18 deletions.
23 changes: 5 additions & 18 deletions test/upsert/mzcompose.py
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,6 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
backpressure_bytes = 50 * 1024 * 1024 # 50MB

c.down(destroy_volumes=True)
c.up("redpanda", "materialized", "clusterd1")
# initial hydration
with c.override(
Testdrive(no_reset=True, consistent_seed=True, default_timeout=f"{5 * 60}s"),
Expand All @@ -558,6 +557,7 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
additional_system_parameter_defaults={
"disk_cluster_replicas_default": "true",
"enable_disk_cluster_replicas": "true",
"unsafe_enable_unorchestrated_cluster_replicas": "true",
"upsert_rocksdb_auto_spill_threshold_bytes": "250",
# Force backpressure to be enabled.
"storage_dataflow_max_inflight_bytes": f"{backpressure_bytes}",
Expand All @@ -569,6 +569,7 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
name="clusterd1",
),
):
c.up("redpanda", "materialized", "clusterd1")
c.up("testdrive", persistent=True)
c.exec("testdrive", "load-test/setup.td")
c.testdrive(
Expand Down Expand Up @@ -611,6 +612,7 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
{
"disk_cluster_replicas_default": "true",
"enable_disk_cluster_replicas": "true",
"unsafe_enable_unorchestrated_cluster_replicas": "true",
"upsert_rocksdb_auto_spill_threshold_bytes": "250",
# Force backpressure to be enabled.
"storage_dataflow_max_inflight_bytes": f"{backpressure_bytes}",
Expand All @@ -623,6 +625,7 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
"storage_rocksdb_use_merge_operator": "true",
"disk_cluster_replicas_default": "true",
"enable_disk_cluster_replicas": "true",
"unsafe_enable_unorchestrated_cluster_replicas": "true",
"upsert_rocksdb_auto_spill_threshold_bytes": "250",
# Force backpressure to be enabled.
"storage_dataflow_max_inflight_bytes": f"{backpressure_bytes}",
Expand All @@ -634,6 +637,7 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
{
"disk_cluster_replicas_default": "true",
"enable_disk_cluster_replicas": "true",
"unsafe_enable_unorchestrated_cluster_replicas": "true",
"upsert_rocksdb_auto_spill_threshold_bytes": "250",
# Force backpressure to be enabled.
"storage_dataflow_max_inflight_bytes": f"{backpressure_bytes}",
Expand Down Expand Up @@ -670,23 +674,6 @@ def workflow_load_test(c: Composition, parser: WorkflowArgumentParser) -> None:
c.kill("materialized", "clusterd1")
print(f"Running rehydration for scenario {scenario_name}")
c.up("materialized", "clusterd1")
c.testdrive(
dedent(
f"""
> select sum(records_indexed)
from mz_internal.mz_source_statistics_raw st
join mz_sources s on s.id = st.id
where name = 's1';
{repeat + 2}
> select bool_and(rehydration_latency IS NOT NULL)
from mz_internal.mz_source_statistics_raw st
join mz_sources s on s.id = st.id
where name = 's1';
true
"""
)
)
# ensure we wait till the stat is updated
rehydration_latency = last_latency
while rehydration_latency == last_latency:
Expand Down

0 comments on commit 2e34c9d

Please sign in to comment.