Skip to content

Commit

Permalink
test: remove manual setting of Kafka metadata interval
Browse files Browse the repository at this point in the history
A bunch of tests set the `kafka_default_metadata_fetch_interval` or the
source `TOPIC METADATA REFRESH INTERVAL` to low values. This was done
because the default for `kafka_default_metadata_fetch_interval` was 30s
and some tests were slowed down by that. The default has now been
changed to 1s, so manually lowering the fetch interval is not necessary
anymore.

Removing the manual setting of the Kafka metadata interval in tests gets
rid of some noise. But more importantly, it fixes the issue of some
tests setting the interval to a very low value, like '10ms', which now
that the metadata fetch interval influences the source tick frequency
could negatively impact performance and make tests slower or even time
out.
  • Loading branch information
teskje committed Jan 9, 2025
1 parent 46657dc commit 4dde088
Show file tree
Hide file tree
Showing 25 changed files with 20 additions and 85 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def initialize(self) -> Testdrive:
$ kafka-ingest format=avro key-format=avro topic=multiple-partitions-topic key-schema=${keyschema} schema=${schema} repeat=100
{"key1": "A${kafka-ingest.iteration}"} {"f1": "A${kafka-ingest.iteration}"}
> CREATE SOURCE multiple_partitions_source_src FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-multiple-partitions-topic-${testdrive.seed}', TOPIC METADATA REFRESH INTERVAL '500ms');
> CREATE SOURCE multiple_partitions_source_src FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-multiple-partitions-topic-${testdrive.seed}');
> CREATE TABLE multiple_partitions_source FROM SOURCE multiple_partitions_source_src (REFERENCE "testdrive-multiple-partitions-topic-${testdrive.seed}")
FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE UPSERT;
Expand Down
1 change: 0 additions & 1 deletion test/cluster/github-7645/01-create-source.td
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
# by the Apache License, Version 2.0.

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
1 change: 0 additions & 1 deletion test/cluster/storage/01-create-sources.td
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET unsafe_enable_unorchestrated_cluster_replicas = true
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
5 changes: 1 addition & 4 deletions test/persistence/kafka-sources/partition-change-before.td
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,7 @@ $ kafka-ingest format=avro topic=partition-change key-format=avro key-schema=${k
);

> CREATE SOURCE partition_change
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-partition-change-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL = '100ms'
)
FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-partition-change-${testdrive.seed}')

> CREATE TABLE partition_change_tbl FROM SOURCE partition_change (REFERENCE "testdrive-partition-change-${testdrive.seed}")
FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
Expand Down
7 changes: 1 addition & 6 deletions test/source-sink-errors/mzcompose.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,13 +185,8 @@ def populate(self, c: Composition) -> None:
$ kafka-ingest topic=source-topic format=bytes
ABC
# Specify a faster metadata refresh interval so errors are detected every second
# instead of every minute
> CREATE SOURCE source1
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-source-topic-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '1s'
)
FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-source-topic-${testdrive.seed}')
> CREATE TABLE source1_tbl FROM SOURCE source1 (REFERENCE "testdrive-source-topic-${testdrive.seed}")
FORMAT BYTES
Expand Down
5 changes: 1 addition & 4 deletions test/ssh-connection/kafka-sink.td
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,7 @@ one
TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);

> CREATE SOURCE non_ssh IN CLUSTER sc
FROM KAFKA CONNECTION kafka_conn_non_ssh (
TOPIC 'testdrive-thetopic-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '1s'
)
FROM KAFKA CONNECTION kafka_conn_non_ssh (TOPIC 'testdrive-thetopic-${testdrive.seed}')

> CREATE TABLE non_ssh_tbl FROM SOURCE non_ssh (REFERENCE "testdrive-thetopic-${testdrive.seed}")
FORMAT TEXT
Expand Down
12 changes: 2 additions & 10 deletions test/ssh-connection/kafka-source.td
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,15 @@ one
> CREATE CONNECTION kafka_conn_dynamic
TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT, SSH TUNNEL thancred);

# Create source with a faster metadata refresh interval so
# errors are detected every second instead of every minute.
> CREATE SOURCE fixed_text IN CLUSTER sc
FROM KAFKA CONNECTION kafka_conn_using (
TOPIC 'testdrive-thetopic-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '1s'
)
FROM KAFKA CONNECTION kafka_conn_using (TOPIC 'testdrive-thetopic-${testdrive.seed}')

> CREATE TABLE fixed_text_tbl FROM SOURCE fixed_text (REFERENCE "testdrive-thetopic-${testdrive.seed}")
FORMAT TEXT
ENVELOPE NONE

> CREATE SOURCE dynamic_text IN CLUSTER sc
FROM KAFKA CONNECTION kafka_conn_dynamic (
TOPIC 'testdrive-thetopic-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '1s'
)
FROM KAFKA CONNECTION kafka_conn_dynamic (TOPIC 'testdrive-thetopic-${testdrive.seed}')

> CREATE TABLE dynamic_text_tbl FROM SOURCE dynamic_text (REFERENCE "testdrive-thetopic-${testdrive.seed}")
FORMAT TEXT
Expand Down
2 changes: 0 additions & 2 deletions test/testdrive-old-kafka-src-syntax/kafka-avro-sources.td
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,6 @@ $ kafka-ingest format=avro topic=non-dbz-data-varying-partition schema=${non-dbz
IN CLUSTER non_dbz_data_varying_partition_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-non-dbz-data-varying-partition-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL = '10ms',
START OFFSET=[1]
)
FORMAT AVRO USING SCHEMA '${non-dbz-schema}'
Expand Down Expand Up @@ -296,7 +295,6 @@ a b
IN CLUSTER non_dbz_data_varying_partition_2_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-non-dbz-data-varying-partition-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL = '10ms',
START OFFSET=[1,1]
)
FORMAT AVRO USING SCHEMA '${non-dbz-schema}'
Expand Down
23 changes: 6 additions & 17 deletions test/testdrive-old-kafka-src-syntax/kafka-time-offset.td
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ grape:grape
> CREATE SOURCE append_time_offset_1
IN CLUSTER append_time_offset_1_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=1,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -94,7 +93,6 @@ grape:grape
> CREATE SOURCE append_time_offset_2
IN CLUSTER append_time_offset_2_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=2,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -105,7 +103,6 @@ grape:grape
> CREATE SOURCE append_time_offset_3
IN CLUSTER append_time_offset_3_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=3,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -116,7 +113,6 @@ grape:grape
> CREATE SOURCE append_time_offset_4
IN CLUSTER append_time_offset_4_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=4,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -127,7 +123,6 @@ grape:grape
> CREATE SOURCE append_time_offset_5
IN CLUSTER append_time_offset_5_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=5,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand Down Expand Up @@ -222,8 +217,7 @@ grape:grape
IN CLUSTER upsert_time_offset_0_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP=0,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)
KEY FORMAT TEXT VALUE FORMAT TEXT
INCLUDE OFFSET
Expand All @@ -234,8 +228,7 @@ grape:grape
IN CLUSTER upsert_time_offset_1_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 1,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)
KEY FORMAT TEXT VALUE FORMAT TEXT
INCLUDE OFFSET
Expand All @@ -246,8 +239,7 @@ grape:grape
IN CLUSTER upsert_time_offset_2_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 2,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)
KEY FORMAT TEXT VALUE FORMAT TEXT
INCLUDE OFFSET
Expand All @@ -258,8 +250,7 @@ grape:grape
IN CLUSTER upsert_time_offset_3_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 3,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)
KEY FORMAT TEXT VALUE FORMAT TEXT
INCLUDE OFFSET
Expand All @@ -270,8 +261,7 @@ grape:grape
IN CLUSTER upsert_time_offset_4_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 4,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)
KEY FORMAT TEXT VALUE FORMAT TEXT
INCLUDE OFFSET
Expand All @@ -282,8 +272,7 @@ grape:grape
IN CLUSTER upsert_time_offset_5_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 5,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)
KEY FORMAT TEXT VALUE FORMAT TEXT
INCLUDE OFFSET
Expand Down
1 change: 0 additions & 1 deletion test/testdrive-old-kafka-src-syntax/pr-24663-regression.td
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
$ set-arg-default default-storage-size=1

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
$ set-arg-default default-replica-size=1

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
1 change: 0 additions & 1 deletion test/testdrive-old-kafka-src-syntax/source-statistics.td
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
$ set-arg-default single-replica-cluster=quickstart

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET unsafe_enable_unorchestrated_cluster_replicas = true
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
$ set-arg-default single-replica-cluster=quickstart

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,7 @@ $ kafka-create-topic topic=data2 partitions=2

> CREATE SOURCE data_rt
IN CLUSTER ${arg.single-replica-cluster}
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-data-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL = '50ms'
)
FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-data-${testdrive.seed}')
FORMAT AVRO USING SCHEMA '${schema}'

> CREATE MATERIALIZED VIEW view_rt AS SELECT b, sum(a) FROM data_rt GROUP BY b
Expand Down
2 changes: 0 additions & 2 deletions test/testdrive/kafka-avro-sources.td
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,6 @@ $ kafka-ingest format=avro topic=non-dbz-data-varying-partition schema=${non-dbz
IN CLUSTER non_dbz_data_varying_partition_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-non-dbz-data-varying-partition-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL = '10ms',
START OFFSET=[1]
)

Expand Down Expand Up @@ -314,7 +313,6 @@ a b
IN CLUSTER non_dbz_data_varying_partition_2_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC 'testdrive-non-dbz-data-varying-partition-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL = '10ms',
START OFFSET=[1,1]
)

Expand Down
1 change: 0 additions & 1 deletion test/testdrive/kafka-sink-statistics.td
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
$ set-arg-default single-replica-cluster=quickstart

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000
ALTER SYSTEM SET kafka_socket_timeout = 5000
Expand Down
23 changes: 6 additions & 17 deletions test/testdrive/kafka-time-offset.td
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ grape:grape
> CREATE SOURCE append_time_offset_1
IN CLUSTER append_time_offset_1_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=1,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -92,7 +91,6 @@ grape:grape
> CREATE SOURCE append_time_offset_2
IN CLUSTER append_time_offset_2_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=2,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -105,7 +103,6 @@ grape:grape
> CREATE SOURCE append_time_offset_3
IN CLUSTER append_time_offset_3_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=3,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -118,7 +115,6 @@ grape:grape
> CREATE SOURCE append_time_offset_4
IN CLUSTER append_time_offset_4_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=4,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand All @@ -131,7 +127,6 @@ grape:grape
> CREATE SOURCE append_time_offset_5
IN CLUSTER append_time_offset_5_cluster
FROM KAFKA CONNECTION kafka_conn (
TOPIC METADATA REFRESH INTERVAL = '10ms',
START TIMESTAMP=5,
TOPIC 'testdrive-t1-${testdrive.seed}'
)
Expand Down Expand Up @@ -228,8 +223,7 @@ grape:grape
IN CLUSTER upsert_time_offset_0_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP=0,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)

> CREATE TABLE upsert_time_offset_0_tbl FROM SOURCE upsert_time_offset_0 (REFERENCE "testdrive-t2-${testdrive.seed}")
Expand All @@ -242,8 +236,7 @@ grape:grape
IN CLUSTER upsert_time_offset_1_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 1,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)

> CREATE TABLE upsert_time_offset_1_tbl FROM SOURCE upsert_time_offset_1 (REFERENCE "testdrive-t2-${testdrive.seed}")
Expand All @@ -256,8 +249,7 @@ grape:grape
IN CLUSTER upsert_time_offset_2_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 2,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)

> CREATE TABLE upsert_time_offset_2_tbl FROM SOURCE upsert_time_offset_2 (REFERENCE "testdrive-t2-${testdrive.seed}")
Expand All @@ -270,8 +262,7 @@ grape:grape
IN CLUSTER upsert_time_offset_3_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 3,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)

> CREATE TABLE upsert_time_offset_3_tbl FROM SOURCE upsert_time_offset_3 (REFERENCE "testdrive-t2-${testdrive.seed}")
Expand All @@ -284,8 +275,7 @@ grape:grape
IN CLUSTER upsert_time_offset_4_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 4,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)

> CREATE TABLE upsert_time_offset_4_tbl FROM SOURCE upsert_time_offset_4 (REFERENCE "testdrive-t2-${testdrive.seed}")
Expand All @@ -298,8 +288,7 @@ grape:grape
IN CLUSTER upsert_time_offset_5_cluster
FROM KAFKA CONNECTION kafka_conn (
START TIMESTAMP 5,
TOPIC 'testdrive-t2-${testdrive.seed}',
TOPIC METADATA REFRESH INTERVAL '10ms'
TOPIC 'testdrive-t2-${testdrive.seed}'
)

> CREATE TABLE upsert_time_offset_5_tbl FROM SOURCE upsert_time_offset_5 (REFERENCE "testdrive-t2-${testdrive.seed}")
Expand Down
1 change: 0 additions & 1 deletion test/testdrive/pr-24663-regression.td
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
$ set-arg-default default-storage-size=1

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
1 change: 0 additions & 1 deletion test/testdrive/snapshot-source-statistics.td
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
$ set-arg-default default-replica-size=1

$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
ALTER SYSTEM SET kafka_default_metadata_fetch_interval = 1000
ALTER SYSTEM SET storage_statistics_collection_interval = 1000
ALTER SYSTEM SET storage_statistics_interval = 2000

Expand Down
Loading

0 comments on commit 4dde088

Please sign in to comment.