From a279c678d33ccfaa46d2265f32600ec5103ad1f3 Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 21 Mar 2023 11:50:57 +0400 Subject: [PATCH 01/16] wip --- .../main/resources/swagger/kafka-ui-api.yaml | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index 71c595e5251..26831c73b33 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -744,6 +744,121 @@ paths: 404: description: Not found + /api/clusters/{clusterName}/topics/{topicName}/smartfilters: + post: + tags: + - Messages + summary: registerFilter + operationId: registerFilter + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + - name: topicName + in: path + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MessageFilterRegistration' + responses: + 200: + description: OK + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/MessageFilterId' + + + /api/clusters/{clusterName}/topics/{topicName}/messages/v2: + get: + tags: + - Messages + summary: getTopicMessagesV2 + operationId: getTopicMessagesV2 + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + - name: topicName + in: path + required: true + schema: + type: string + - name: m + in: query + description: Messages polling mode + schema: + $ref: "#/components/schemas/PollingMode" + - name: p + in: query + schema: + type: array + description: List of target partitions( all partitions if not provided) + items: + type: integer + - name: lim + in: query + description: Max number of messages can be returned + schema: + type: integer + - name: q + in: query + description: query string to contains string filtration + schema: + type: string + - name: fid + in: query + description: filter id, that was registered beforehand + schema: + type: string + - name: off + in: query + description: offset to read from / to + schema: + type: integer + format: int64 + - name: offs + in: query + description: partition offsets to read from / to. Format is "p1:off1,p2:off2,..." + schema: + type: integer + format: int64 + - name: ts + in: query + description: timestamp (in ms) to read from / to + schema: + type: integer + format: int64 + - name: ks + in: query + description: "Serde that should be used for deserialization. Will be chosen automatically if not set." + schema: + type: string + - name: vs + in: query + description: "Serde that should be used for deserialization. Will be chosen automatically if not set." + schema: + type: string + responses: + 200: + description: OK + content: + text/event-stream: + schema: + type: array + items: + $ref: '#/components/schemas/TopicMessageEvent' + /api/clusters/{clusterName}/topics/{topicName}/consumer-groups: get: tags: @@ -2569,6 +2684,29 @@ components: - TIMESTAMP - LATEST + MessageFilterRegistration: + type: object + properties: + filterCode: + type: string + + MessageFilterId: + type: object + properties: + id: + type: string + + PollingMode: + type: string + enum: + - FROM_OFFSET + - TO_OFFSET + - FROM_TIMESTAMP + - TO_TIMESTAMP + - LATEST + - FIRST + - TAILING + MessageFilterType: type: string enum: From 4be46ec520cd97f823d01abe3893ab1261b8b35c Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 21 Mar 2023 19:13:01 +0400 Subject: [PATCH 02/16] wip --- documentation/compose/kafka-ui.yaml | 13 ++-- .../ui/controller/MessagesController.java | 50 +++++++++++++++ .../kafka/ui/service/MessagesService.java | 61 +++++++++++++++++++ .../main/resources/swagger/kafka-ui-api.yaml | 20 +++--- 4 files changed, 127 insertions(+), 17 deletions(-) diff --git a/documentation/compose/kafka-ui.yaml b/documentation/compose/kafka-ui.yaml index 8524f6fa2ba..d4f08e9564c 100644 --- a/documentation/compose/kafka-ui.yaml +++ b/documentation/compose/kafka-ui.yaml @@ -20,11 +20,6 @@ services: KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085 KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083 - KAFKA_CLUSTERS_1_NAME: secondLocal - KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092 - KAFKA_CLUSTERS_1_METRICS_PORT: 9998 - KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085 - DYNAMIC_CONFIG_ENABLED: 'true' kafka0: image: confluentinc/cp-kafka:7.2.1 @@ -45,7 +40,7 @@ services: KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_NODE_ID: 1 - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093,2@kafka1:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' @@ -62,7 +57,7 @@ services: - "9093:9092" - "9998:9998" environment: - KAFKA_BROKER_ID: 1 + KAFKA_BROKER_ID: 2 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 @@ -72,8 +67,8 @@ services: KAFKA_JMX_PORT: 9998 KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998 KAFKA_PROCESS_ROLES: 'broker,controller' - KAFKA_NODE_ID: 1 - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093' + KAFKA_NODE_ID: 2 + KAFKA_CONTROLLER_QUORUM_VOTERS: '2@kafka1:29093,1@kafka0:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index 1ba511ab07c..9fa53beff1f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -11,7 +11,10 @@ import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; +import com.provectus.kafka.ui.model.MessageFilterIdDTO; +import com.provectus.kafka.ui.model.MessageFilterRegistrationDTO; import com.provectus.kafka.ui.model.MessageFilterTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SeekTypeDTO; import com.provectus.kafka.ui.model.SerdeUsageDTO; @@ -70,6 +73,7 @@ public Mono> deleteTopicMessages( ); } + @Deprecated @Override public Mono>> getTopicMessages(String clusterName, String topicName, @@ -182,4 +186,50 @@ public Mono> getSerdes(String clusterNam .map(ResponseEntity::ok) ); } + + + @Override + public Mono>> getTopicMessagesV2(String clusterName, String topicName, + PollingModeDTO mode, + @Nullable List partitions, + @Nullable Integer limit, + @Nullable String query, + @Nullable String filterId, + @Nullable String offsetString, + @Nullable Long ts, + @Nullable String ks, + @Nullable String vs, + ServerWebExchange exchange) { + final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + .cluster(clusterName) + .topic(topicName) + .topicActions(MESSAGES_READ) + .build()); + + int recordsLimit = + Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); + + return validateAccess.then( + Mono.just( + ResponseEntity.ok( + messagesService.loadMessagesV2( + getCluster(clusterName), topicName, positions, q, filterQueryType, + recordsLimit, seekDirection, keySerde, valueSerde) + ) + ) + ); + } + + interface PollingMode { + static PollingMode create(PollingModeDTO mode, @Nullable String offsetString, @Nullable Long timestamp) { + return null; + } + } + + @Override + public Mono>> registerFilter(String clusterName, String topicName, + Mono messageFilterRegistrationDTO, + ServerWebExchange exchange) { + return null; + } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index d1f0e261a8c..b92f0b6f2a9 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -12,6 +12,7 @@ import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; import com.provectus.kafka.ui.model.MessageFilterTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serde.api.Serde; @@ -196,6 +197,66 @@ private Flux loadMessagesImpl(KafkaCluster cluster, .map(throttleUiPublish(seekDirection)); } + public Flux loadMessagesV2(KafkaCluster cluster, + String topic, + PollingModeDTO pollingMode, + @Nullable String query, + @Nullable String filterId, + int limit, + @Nullable String keySerde, + @Nullable String valueSerde) { + return withExistingTopic(cluster, topic) + .flux() + .publishOn(Schedulers.boundedElastic()) + .flatMap(td -> loadMessagesImplV2(cluster, topic, consumerPosition, query, + filterQueryType, limit, seekDirection, keySerde, valueSerde)); + } + + private Flux loadMessagesImplV2(KafkaCluster cluster, + String topic, + ConsumerPosition consumerPosition, + @Nullable String query, + MessageFilterTypeDTO filterQueryType, + int limit, + SeekDirectionDTO seekDirection, + @Nullable String keySerde, + @Nullable String valueSerde) { + + java.util.function.Consumer> emitter; + ConsumerRecordDeserializer recordDeserializer = + deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); + if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { + emitter = new ForwardRecordEmitter( + () -> consumerGroupService.createConsumer(cluster), + consumerPosition, + recordDeserializer, + cluster.getThrottler().get() + ); + } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { + emitter = new BackwardRecordEmitter( + () -> consumerGroupService.createConsumer(cluster), + consumerPosition, + limit, + recordDeserializer, + cluster.getThrottler().get() + ); + } else { + emitter = new TailingEmitter( + () -> consumerGroupService.createConsumer(cluster), + consumerPosition, + recordDeserializer, + cluster.getThrottler().get() + ); + } + MessageFilterStats filterStats = new MessageFilterStats(); + return Flux.create(emitter) + .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) + .filter(getMsgFilter(query, filterQueryType, filterStats)) + .map(getDataMasker(cluster, topic)) + .takeWhile(createTakeWhilePredicate(seekDirection, limit)) + .map(throttleUiPublish(seekDirection)); + } + private Predicate createTakeWhilePredicate( SeekDirectionDTO seekDirection, int limit) { return seekDirection == SeekDirectionDTO.TAILING diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index 26831c73b33..ca85d018979 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -797,6 +797,7 @@ paths: - name: m in: query description: Messages polling mode + required: true schema: $ref: "#/components/schemas/PollingMode" - name: p @@ -821,18 +822,11 @@ paths: description: filter id, that was registered beforehand schema: type: string - - name: off - in: query - description: offset to read from / to - schema: - type: integer - format: int64 - name: offs in: query description: partition offsets to read from / to. Format is "p1:off1,p2:off2,..." schema: - type: integer - format: int64 + type: string - name: ts in: query description: timestamp (in ms) to read from / to @@ -2576,6 +2570,7 @@ components: - MESSAGE - CONSUMING - DONE + - CURSOR - EMIT_THROTTLING message: $ref: "#/components/schemas/TopicMessage" @@ -2583,6 +2578,8 @@ components: $ref: "#/components/schemas/TopicMessagePhase" consuming: $ref: "#/components/schemas/TopicMessageConsuming" + cursor: + $ref: "#/components/schemas/TopicMessageNextPageCursor" TopicMessagePhase: type: object @@ -2612,6 +2609,13 @@ components: filterApplyErrors: type: integer + TopicMessageNextPageCursor: + type: object + properties: + offsetsString: + type: string + pollingMode: + $ref: "#/components/schemas/PollingMode" TopicMessage: type: object From 306c1fb1b77b2257f291db6da2250c5ac63c5a58 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:18:05 +0400 Subject: [PATCH 03/16] wip --- .../ui/controller/MessagesController.java | 96 +++--------- .../kafka/ui/emitter/AbstractEmitter.java | 2 +- .../ui/emitter/BackwardRecordEmitter.java | 4 +- .../ui/emitter/ForwardRecordEmitter.java | 3 +- .../kafka/ui/emitter/MessageFilters.java | 16 +- .../kafka/ui/emitter/SeekOperations.java | 76 ++++++---- .../kafka/ui/emitter/TailingEmitter.java | 3 +- .../kafka/ui/model/ConsumerPosition.java | 90 ++++++++++- .../kafka/ui/service/MessagesService.java | 143 ++++++------------ .../kafka/ui/KafkaConsumerTests.java | 2 +- .../kafka/ui/emitter/SeekOperationsTest.java | 36 +++-- .../kafka/ui/emitter/TailingEmitterTest.java | 16 +- .../kafka/ui/service/MessagesServiceTest.java | 12 +- .../kafka/ui/service/RecordEmitterTest.java | 80 +++++----- .../kafka/ui/service/SendAndReadTests.java | 13 +- .../main/resources/swagger/kafka-ui-api.yaml | 11 +- 16 files changed, 289 insertions(+), 314 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index 9fa53beff1f..88cf0746bb6 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -5,10 +5,8 @@ import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ; import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY; import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE; -import static java.util.stream.Collectors.toMap; import com.provectus.kafka.ui.api.MessagesApi; -import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.MessageFilterIdDTO; @@ -26,14 +24,11 @@ import com.provectus.kafka.ui.service.MessagesService; import com.provectus.kafka.ui.service.rbac.AccessControlService; import java.util.List; -import java.util.Map; import java.util.Optional; import javax.annotation.Nullable; import javax.validation.Valid; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.kafka.common.TopicPartition; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.server.ServerWebExchange; @@ -86,32 +81,7 @@ public Mono>> getTopicMessages(String String keySerde, String valueSerde, ServerWebExchange exchange) { - final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() - .cluster(clusterName) - .topic(topicName) - .topicActions(MESSAGES_READ) - .build()); - - seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING; - seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD; - filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS; - int recordsLimit = - Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); - - var positions = new ConsumerPosition( - seekType, - topicName, - parseSeekTo(topicName, seekType, seekTo) - ); - Mono>> job = Mono.just( - ResponseEntity.ok( - messagesService.loadMessages( - getCluster(clusterName), topicName, positions, q, filterQueryType, - recordsLimit, seekDirection, keySerde, valueSerde) - ) - ); - - return validateAccess.then(job); + throw new IllegalStateException(); } @Override @@ -132,34 +102,6 @@ public Mono> sendTopicMessages( ); } - /** - * The format is [partition]::[offset] for specifying offsets - * or [partition]::[timestamp in millis] for specifying timestamps. - */ - @Nullable - private Map parseSeekTo(String topic, SeekTypeDTO seekType, List seekTo) { - if (seekTo == null || seekTo.isEmpty()) { - if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) { - return null; - } - throw new ValidationException("seekTo should be set if seekType is " + seekType); - } - return seekTo.stream() - .map(p -> { - String[] split = p.split("::"); - if (split.length != 2) { - throw new IllegalArgumentException( - "Wrong seekTo argument format. See API docs for details"); - } - - return Pair.of( - new TopicPartition(topic, Integer.parseInt(split[0])), - Long.parseLong(split[1]) - ); - }) - .collect(toMap(Pair::getKey, Pair::getValue)); - } - @Override public Mono> getSerdes(String clusterName, String topicName, @@ -197,8 +139,8 @@ public Mono>> getTopicMessagesV2(Strin @Nullable String filterId, @Nullable String offsetString, @Nullable Long ts, - @Nullable String ks, - @Nullable String vs, + @Nullable String keySerde, + @Nullable String valueSerde, ServerWebExchange exchange) { final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) @@ -206,6 +148,8 @@ public Mono>> getTopicMessagesV2(Strin .topicActions(MESSAGES_READ) .build()); + ConsumerPosition consumerPosition = ConsumerPosition.create(mode, topicName, partitions, ts, offsetString); + int recordsLimit = Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); @@ -213,23 +157,25 @@ public Mono>> getTopicMessagesV2(Strin Mono.just( ResponseEntity.ok( messagesService.loadMessagesV2( - getCluster(clusterName), topicName, positions, q, filterQueryType, - recordsLimit, seekDirection, keySerde, valueSerde) - ) - ) - ); + getCluster(clusterName), topicName, consumerPosition, + query, filterId, recordsLimit, keySerde, valueSerde)))); } - interface PollingMode { - static PollingMode create(PollingModeDTO mode, @Nullable String offsetString, @Nullable Long timestamp) { - return null; - } - } @Override - public Mono>> registerFilter(String clusterName, String topicName, - Mono messageFilterRegistrationDTO, - ServerWebExchange exchange) { - return null; + public Mono> registerFilter(String clusterName, + String topicName, + Mono registration, + ServerWebExchange exchange) { + + final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + .cluster(clusterName) + .topic(topicName) + .topicActions(MESSAGES_READ) + .build()); + + return validateAccess.then(registration) + .map(reg -> messagesService.registerMessageFilter(reg.getFilterCode())) + .map(id -> ResponseEntity.ok(new MessageFilterIdDTO().id(id))); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index 646cf81ca67..5dfe1b85a1b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -12,7 +12,7 @@ import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; -public abstract class AbstractEmitter { +public abstract class AbstractEmitter implements java.util.function.Consumer> { private final ConsumerRecordDeserializer recordDeserializer; private final ConsumingStats consumingStats = new ConsumingStats(); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java index 42f94a1e019..d2ab8773e77 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java @@ -19,9 +19,7 @@ import reactor.core.publisher.FluxSink; @Slf4j -public class BackwardRecordEmitter - extends AbstractEmitter - implements java.util.function.Consumer> { +public class BackwardRecordEmitter extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition consumerPosition; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index 971e2f7c9c4..d60d99d76cf 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -14,8 +14,7 @@ @Slf4j public class ForwardRecordEmitter - extends AbstractEmitter - implements java.util.function.Consumer> { + extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition position; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java index e48501f6a75..28a1b20b178 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java @@ -1,7 +1,6 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.exception.ValidationException; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import groovy.json.JsonSlurper; import java.util.function.Predicate; @@ -22,23 +21,12 @@ public class MessageFilters { private MessageFilters() { } - public static Predicate createMsgFilter(String query, MessageFilterTypeDTO type) { - switch (type) { - case STRING_CONTAINS: - return containsStringFilter(query); - case GROOVY_SCRIPT: - return groovyScriptFilter(query); - default: - throw new IllegalStateException("Unknown query type: " + type); - } - } - - static Predicate containsStringFilter(String string) { + public static Predicate containsStringFilter(String string) { return msg -> StringUtils.contains(msg.getKey(), string) || StringUtils.contains(msg.getContent(), string); } - static Predicate groovyScriptFilter(String script) { + public static Predicate groovyScriptFilter(String script) { var compiledScript = compileScript(script); var jsonSlurper = new JsonSlurper(); return new Predicate() { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 014b1207572..3d1d02345a7 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -1,13 +1,13 @@ package com.provectus.kafka.ui.emitter; +import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import java.util.HashMap; import java.util.Map; -import java.util.stream.Collectors; -import javax.annotation.Nullable; import lombok.AccessLevel; import lombok.RequiredArgsConstructor; import org.apache.kafka.clients.consumer.Consumer; @@ -22,15 +22,15 @@ class SeekOperations { static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { OffsetsInfo offsetsInfo; - if (consumerPosition.getSeekTo() == null) { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic()); + if (consumerPosition.partitions().isEmpty()) { + offsetsInfo = new OffsetsInfo(consumer, consumerPosition.topic()); } else { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet()); + offsetsInfo = new OffsetsInfo(consumer, consumerPosition.partitions()); } return new SeekOperations( consumer, offsetsInfo, - getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo()) + getOffsetsForSeek(consumer, offsetsInfo, consumerPosition) ); } @@ -61,28 +61,34 @@ Map getOffsetsForSeek() { */ @VisibleForTesting static Map getOffsetsForSeek(Consumer consumer, - OffsetsInfo offsetsInfo, - SeekTypeDTO seekType, - @Nullable Map seekTo) { - switch (seekType) { - case LATEST: + OffsetsInfo offsetsInfo, + ConsumerPosition position) { + switch (position.pollingMode()) { + case LATEST, TAILING: return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); - case BEGINNING: + case EARLIEST: return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); - case OFFSET: - Preconditions.checkNotNull(offsetsInfo); - return fixOffsets(offsetsInfo, seekTo); - case TIMESTAMP: - Preconditions.checkNotNull(offsetsInfo); - return offsetsForTimestamp(consumer, offsetsInfo, seekTo); + case FROM_OFFSET, TO_OFFSET: + Preconditions.checkNotNull(position.offsets()); + return fixOffsets(offsetsInfo, position.offsets()); + case FROM_TIMESTAMP, TO_TIMESTAMP: + Preconditions.checkNotNull(position.timestamp()); + return offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, position.timestamp()); default: throw new IllegalStateException(); } } - private static Map fixOffsets(OffsetsInfo offsetsInfo, Map offsets) { - offsets = new HashMap<>(offsets); - offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); + private static Map fixOffsets(OffsetsInfo offsetsInfo, + ConsumerPosition.Offsets positionOffset) { + var offsets = new HashMap(); + if (positionOffset.offset() != null) { + offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset())); + } else { + Preconditions.checkNotNull(positionOffset.tpOffsets()); + offsets.putAll(positionOffset.tpOffsets()); + offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); + } Map result = new HashMap<>(); offsets.forEach((tp, targetOffset) -> { @@ -99,13 +105,25 @@ private static Map fixOffsets(OffsetsInfo offsetsInfo, Map return result; } - private static Map offsetsForTimestamp(Consumer consumer, OffsetsInfo offsetsInfo, - Map timestamps) { - timestamps = new HashMap<>(timestamps); - timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); + private static Map offsetsForTimestamp(Consumer consumer, + PollingModeDTO pollingMode, + OffsetsInfo offsetsInfo, + Long timestamp) { + Map timestamps = new HashMap<>(); + offsetsInfo.getNonEmptyPartitions().forEach(tp -> timestamps.put(tp, timestamp)); - return consumer.offsetsForTimes(timestamps).entrySet().stream() - .filter(e -> e.getValue() != null) - .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset())); + Map result = new HashMap<>(); + consumer.offsetsForTimes(timestamps).forEach((tp, offsetAndTimestamp) -> { + if (offsetAndTimestamp == null) { + if (pollingMode == TO_TIMESTAMP && offsetsInfo.getNonEmptyPartitions().contains(tp)) { + // if no offset was returned this means that *all* timestamps are lower + // than target timestamp. Is case of TO_OFFSET mode we need to read from the ending of tp + result.put(tp, offsetsInfo.getEndOffsets().get(tp)); + } + } else { + result.put(tp, offsetAndTimestamp.offset()); + } + }); + return result; } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index 4554069c1c9..458f2d77a64 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -12,8 +12,7 @@ import reactor.core.publisher.FluxSink; @Slf4j -public class TailingEmitter extends AbstractEmitter - implements java.util.function.Consumer> { +public class TailingEmitter extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition consumerPosition; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 9d77923fbc6..6bc71edb5f2 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -1,14 +1,90 @@ package com.provectus.kafka.ui.model; +import static java.util.stream.Collectors.toMap; + +import com.provectus.kafka.ui.exception.ValidationException; +import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.annotation.Nullable; -import lombok.Value; +import org.apache.commons.lang3.tuple.Pair; import org.apache.kafka.common.TopicPartition; +import org.springframework.util.StringUtils; + + +public record ConsumerPosition(PollingModeDTO pollingMode, + String topic, + List partitions, //all partitions if list is empty + @Nullable Long timestamp, + @Nullable Offsets offsets) { + + public record Offsets(@Nullable Long offset, + @Nullable Map tpOffsets) { + } + + public static ConsumerPosition create(PollingModeDTO pollingMode, + String topic, + @Nullable List partitions, + @Nullable Long timestamp, + @Nullable String offsetsStr) { + Offsets offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); + + var topicPartitions = Optional.ofNullable(partitions).orElse(List.of()) + .stream() + .map(p -> new TopicPartition(topic, p)) + .collect(Collectors.toList()); + + // if offsets are specified -inferring partitions list from there + topicPartitions = offsets.tpOffsets == null ? topicPartitions : List.copyOf(offsets.tpOffsets.keySet()); + + return new ConsumerPosition( + pollingMode, + topic, + Optional.ofNullable(topicPartitions).orElse(List.of()), + validateTimestamp(pollingMode, timestamp), + offsets + ); + } + + private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long ts) { + if (pollingMode == PollingModeDTO.FROM_TIMESTAMP || pollingMode == PollingModeDTO.TO_TIMESTAMP) { + if (ts == null) { + throw new ValidationException("timestamp not provided for " + pollingMode); + } + } + return ts; + } + + private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode, + String topic, + @Nullable String offsetsStr) { + Offsets offsets = null; + if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) { + if (!StringUtils.hasText(offsetsStr)) { + throw new ValidationException("offsets not provided for " + pollingMode); + } + if (offsetsStr.contains(":")) { + offsets = new Offsets(Long.parseLong(offsetsStr), null); + } else { + Map tpOffsets = Stream.of(offsetsStr.split(",")) + .map(p -> { + String[] split = p.split(":"); + if (split.length != 2) { + throw new IllegalArgumentException( + "Wrong seekTo argument format. See API docs for details"); + } + return Pair.of( + new TopicPartition(topic, Integer.parseInt(split[0])), + Long.parseLong(split[1]) + ); + }) + .collect(toMap(Pair::getKey, Pair::getValue)); + offsets = new Offsets(null, tpOffsets); + } + } + return offsets; + } -@Value -public class ConsumerPosition { - SeekTypeDTO seekType; - String topic; - @Nullable - Map seekTo; // null if positioning should apply to all tps } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 0b4c2c95517..722f0997217 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -12,9 +12,8 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.PollingModeDTO; -import com.provectus.kafka.ui.model.SeekDirectionDTO; +import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; @@ -23,14 +22,16 @@ import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Random; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import javax.annotation.Nullable; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.producer.KafkaProducer; @@ -41,7 +42,6 @@ import org.apache.kafka.common.serialization.ByteArraySerializer; import org.springframework.stereotype.Service; import reactor.core.publisher.Flux; -import reactor.core.publisher.FluxSink; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; @@ -57,6 +57,8 @@ public class MessagesService { private final DeserializationService deserializationService; private final ConsumerGroupService consumerGroupService; + private final Map> registeredFilters = new ConcurrentHashMap<>(); + private Mono withExistingTopic(KafkaCluster cluster, String topicName) { return adminClientService.get(cluster) .flatMap(client -> client.describeTopic(topicName)) @@ -137,69 +139,9 @@ private Mono sendMessageImpl(KafkaCluster cluster, } } - public Flux loadMessages(KafkaCluster cluster, String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - int limit, - SeekDirectionDTO seekDirection, - @Nullable String keySerde, - @Nullable String valueSerde) { - return withExistingTopic(cluster, topic) - .flux() - .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query, - filterQueryType, limit, seekDirection, keySerde, valueSerde)); - } - - private Flux loadMessagesImpl(KafkaCluster cluster, - String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - int limit, - SeekDirectionDTO seekDirection, - @Nullable String keySerde, - @Nullable String valueSerde) { - - java.util.function.Consumer> emitter; - ConsumerRecordDeserializer recordDeserializer = - deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); - if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { - emitter = new ForwardRecordEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - recordDeserializer, - cluster.getPollingSettings() - ); - } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { - emitter = new BackwardRecordEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - limit, - recordDeserializer, - cluster.getPollingSettings() - ); - } else { - emitter = new TailingEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - recordDeserializer, - cluster.getPollingSettings() - ); - } - MessageFilterStats filterStats = new MessageFilterStats(); - return Flux.create(emitter) - .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) - .filter(getMsgFilter(query, filterQueryType, filterStats)) - .map(getDataMasker(cluster, topic)) - .takeWhile(createTakeWhilePredicate(seekDirection, limit)) - .map(throttleUiPublish(seekDirection)); - } - public Flux loadMessagesV2(KafkaCluster cluster, String topic, - PollingModeDTO pollingMode, + ConsumerPosition position, @Nullable String query, @Nullable String filterId, int limit, @@ -208,58 +150,55 @@ public Flux loadMessagesV2(KafkaCluster cluster, return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImplV2(cluster, topic, consumerPosition, query, - filterQueryType, limit, seekDirection, keySerde, valueSerde)); + .flatMap(td -> loadMessagesImplV2(cluster, topic, position, query, filterId, limit, keySerde, valueSerde)); } private Flux loadMessagesImplV2(KafkaCluster cluster, String topic, ConsumerPosition consumerPosition, @Nullable String query, - MessageFilterTypeDTO filterQueryType, + @Nullable String filterId, int limit, - SeekDirectionDTO seekDirection, @Nullable String keySerde, @Nullable String valueSerde) { - java.util.function.Consumer> emitter; ConsumerRecordDeserializer recordDeserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); - if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { - emitter = new ForwardRecordEmitter( + + var emitter = switch (consumerPosition.pollingMode()) { + case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, + limit, recordDeserializer, - cluster.getThrottler().get() + cluster.getPollingSettings() ); - } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { - emitter = new BackwardRecordEmitter( + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - limit, recordDeserializer, - cluster.getThrottler().get() + cluster.getPollingSettings() ); - } else { - emitter = new TailingEmitter( + case TAILING -> new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, recordDeserializer, - cluster.getThrottler().get() + cluster.getPollingSettings() ); - } + }; + MessageFilterStats filterStats = new MessageFilterStats(); return Flux.create(emitter) .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) - .filter(getMsgFilter(query, filterQueryType, filterStats)) + .filter(getMsgFilter(query, filterId, filterStats)) .map(getDataMasker(cluster, topic)) - .takeWhile(createTakeWhilePredicate(seekDirection, limit)) - .map(throttleUiPublish(seekDirection)); + .takeWhile(createTakeWhilePredicate(consumerPosition.pollingMode(), limit)) + .map(throttleUiPublish(consumerPosition.pollingMode())); } private Predicate createTakeWhilePredicate( - SeekDirectionDTO seekDirection, int limit) { - return seekDirection == SeekDirectionDTO.TAILING + PollingModeDTO pollingMode, int limit) { + return pollingMode == PollingModeDTO.TAILING ? evt -> true // no limit for tailing : new ResultSizeLimiter(limit); } @@ -278,21 +217,35 @@ private UnaryOperator getDataMasker(KafkaCluster cluster, }; } - private Predicate getMsgFilter(String query, - MessageFilterTypeDTO filterQueryType, + public String registerMessageFilter(String groovyCode) { + var filter = MessageFilters.groovyScriptFilter(groovyCode); + var id = RandomStringUtils.random(10, true, true); + registeredFilters.put(id, filter); + return id; + } + + private Predicate getMsgFilter(@Nullable String containsStrFilter, + @Nullable String filterId, MessageFilterStats filterStats) { - if (StringUtils.isEmpty(query)) { - return evt -> true; + Predicate messageFilter = e -> true; + if (containsStrFilter != null) { + messageFilter = MessageFilters.containsStringFilter(containsStrFilter); + } + if (filterId != null) { + messageFilter = registeredFilters.get(filterId); + if (messageFilter == null) { + throw new ValidationException("No filter was registered with id " + filterId); + } } - var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType); + Predicate finalMessageFilter = messageFilter; return evt -> { // we only apply filter for message events if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) { try { - return messageFilter.test(evt.getMessage()); + return finalMessageFilter.test(evt.getMessage()); } catch (Exception e) { filterStats.incrementApplyErrors(); - log.trace("Error applying filter '{}' for message {}", query, evt.getMessage()); + log.trace("Error applying filter for message {}", evt.getMessage()); return false; } } @@ -300,8 +253,8 @@ private Predicate getMsgFilter(String query, }; } - private UnaryOperator throttleUiPublish(SeekDirectionDTO seekDirection) { - if (seekDirection == SeekDirectionDTO.TAILING) { + private UnaryOperator throttleUiPublish(PollingModeDTO pollingMode) { + if (pollingMode == PollingModeDTO.TAILING) { RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE); return m -> { rateLimiter.acquire(1); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java index ff11aa6656a..b925ea607f2 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java @@ -56,7 +56,7 @@ public void shouldDeleteRecords() { } long count = webTestClient.get() - .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName) + .uri("/api/clusters/{clusterName}/topics/{topicName}/messages/v2?m=EARLIEST", LOCAL, topicName) .accept(TEXT_EVENT_STREAM) .exchange() .expectStatus() diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java index affa423123c..79bda501740 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java @@ -2,7 +2,9 @@ import static org.assertj.core.api.Assertions.assertThat; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.PollingModeDTO; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -14,6 +16,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; class SeekOperationsTest { @@ -45,8 +49,7 @@ void latest() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.LATEST, - null + new ConsumerPosition(PollingModeDTO.LATEST, topic, null, null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L)); } @@ -56,33 +59,38 @@ void beginning() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.BEGINNING, - null + new ConsumerPosition(PollingModeDTO.EARLIEST, topic, null, null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L)); } - @Test - void offsets() { + @ParameterizedTest + @CsvSource({"TO_OFFSET", "FROM_OFFSET"}) + void offsets(PollingModeDTO mode) { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.OFFSET, - Map.of(tp1, 10L, tp2, 10L, tp3, 26L) + new ConsumerPosition( + mode, topic, List.of(tp1, tp2, tp3), null, + new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 10L, tp3, 26L)) + ) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L)); } - @Test - void offsetsWithBoundsFixing() { + @ParameterizedTest + @CsvSource({"TO_OFFSET", "FROM_OFFSET"}) + void offsetsWithBoundsFixing(PollingModeDTO mode) { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.OFFSET, - Map.of(tp1, 10L, tp2, 21L, tp3, 24L) + new ConsumerPosition( + mode, topic, List.of(tp1, tp2, tp3), null, + new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 21L, tp3, 24L)) + ) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L)); } } -} \ No newline at end of file +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java index 2798bd213fe..6585ba840ea 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java @@ -4,10 +4,9 @@ import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serdes.builtin.StringSerde; import com.provectus.kafka.ui.service.ClustersStorage; import com.provectus.kafka.ui.service.MessagesService; import java.time.Duration; @@ -110,14 +109,13 @@ private Flux createTailingFlux( .get(); return applicationContext.getBean(MessagesService.class) - .loadMessages(cluster, topicName, - new ConsumerPosition(SeekTypeDTO.LATEST, topic, null), + .loadMessagesV2(cluster, topicName, + new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null), query, - MessageFilterTypeDTO.STRING_CONTAINS, + null, 0, - SeekDirectionDTO.TAILING, - "String", - "String"); + StringSerde.name(), + StringSerde.name()); } private List startTailing(String filterQuery) { diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index 75a69adec71..fa2ad19aace 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -5,8 +5,7 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; @@ -55,7 +54,9 @@ void sendMessageReturnsExceptionWhenTopicNotFound() { @Test void loadMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService - .loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String")) + .loadMessagesV2(cluster, NON_EXISTING_TOPIC, + new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null), + null, null, 1, "String", "String")) .expectError(TopicNotFoundException.class) .verify(); } @@ -68,14 +69,13 @@ void maskingAppliedOnConfiguredClusters() throws Exception { producer.send(testTopic, "message1"); producer.send(testTopic, "message2").get(); - Flux msgsFlux = messagesService.loadMessages( + Flux msgsFlux = messagesService.loadMessagesV2( cluster, testTopic, - new ConsumerPosition(SeekTypeDTO.BEGINNING, testTopic, null), + new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), null, null, 100, - SeekDirectionDTO.FORWARD, StringSerde.name(), StringSerde.name() ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java index e7b9edf834d..1474a034a84 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java @@ -1,9 +1,11 @@ package com.provectus.kafka.ui.service; -import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING; -import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST; -import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET; -import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP; +import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_OFFSET; +import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_TIMESTAMP; +import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.TO_OFFSET; +import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP; import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.AbstractIntegrationTest; @@ -11,6 +13,7 @@ import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; import com.provectus.kafka.ui.emitter.PollingSettings; import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.ConsumerPosition.Offsets; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serde.api.Serde; @@ -60,10 +63,11 @@ class RecordEmitterTest extends AbstractIntegrationTest { static void generateMsgs() throws Exception { createTopic(new NewTopic(TOPIC, PARTITIONS, (short) 1)); createTopic(new NewTopic(EMPTY_TOPIC, PARTITIONS, (short) 1)); + long startTs = System.currentTimeMillis(); try (var producer = KafkaTestProducer.forKafka(kafka)) { for (int partition = 0; partition < PARTITIONS; partition++) { for (int i = 0; i < MSGS_PER_PARTITION; i++) { - long ts = System.currentTimeMillis() + i; + long ts = (startTs += 100); var value = "msg_" + partition + "_" + i; var metadata = producer.send( new ProducerRecord<>( @@ -110,14 +114,14 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { void pollNothingOnEmptyTopic() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null), + new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), RECORD_DESERIALIZER, PollingSettings.createDefault() ); var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null), + new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, RECORD_DESERIALIZER, PollingSettings.createDefault() @@ -140,14 +144,14 @@ void pollNothingOnEmptyTopic() { void pollFullTopicFromBeginning() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(BEGINNING, TOPIC, null), + new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null), RECORD_DESERIALIZER, PollingSettings.createDefault() ); var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(LATEST, TOPIC, null), + new ConsumerPosition(LATEST, TOPIC, List.of(), null, null), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, PollingSettings.createDefault() @@ -169,14 +173,16 @@ void pollWithOffsets() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), + new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, + new Offsets(null, targetOffsets)), RECORD_DESERIALIZER, PollingSettings.createDefault() ); var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, + new Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, PollingSettings.createDefault() @@ -199,47 +205,40 @@ void pollWithOffsets() { @Test void pollWithTimestamps() { - Map targetTimestamps = new HashMap<>(); - final Map> perPartition = - SENT_RECORDS.stream().collect(Collectors.groupingBy((r) -> r.tp)); - for (int i = 0; i < PARTITIONS; i++) { - final List records = perPartition.get(new TopicPartition(TOPIC, i)); - int randRecordIdx = ThreadLocalRandom.current().nextInt(records.size()); - log.info("partition: {} position: {}", i, randRecordIdx); - targetTimestamps.put( - new TopicPartition(TOPIC, i), - records.get(randRecordIdx).getTimestamp() - ); - } + var tsStats = SENT_RECORDS.stream().mapToLong(Record::getTimestamp).summaryStatistics(); + //choosing ts in the middle + long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2); var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps), + new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), RECORD_DESERIALIZER, PollingSettings.createDefault() ); + expectEmitter( + forwardEmitter, + SENT_RECORDS.stream() + .filter(r -> r.getTimestamp() >= targetTimestamp) + .map(Record::getValue) + .collect(Collectors.toList()) + ); + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps), + new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, PollingSettings.createDefault() ); - var expectedValues = SENT_RECORDS.stream() - .filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp())) - .map(Record::getValue) - .collect(Collectors.toList()); - - expectEmitter(forwardEmitter, expectedValues); - - expectedValues = SENT_RECORDS.stream() - .filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp())) - .map(Record::getValue) - .collect(Collectors.toList()); - - expectEmitter(backwardEmitter, expectedValues); + expectEmitter( + backwardEmitter, + SENT_RECORDS.stream() + .filter(r -> r.getTimestamp() < targetTimestamp) + .map(Record::getValue) + .collect(Collectors.toList()) + ); } @Test @@ -252,7 +251,8 @@ void backwardEmitterSeekToEnd() { var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, + new Offsets(null, targetOffsets)), numMessages, RECORD_DESERIALIZER, PollingSettings.createDefault() @@ -278,7 +278,7 @@ void backwardEmitterSeekToBegin() { var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, offsets), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)), 100, RECORD_DESERIALIZER, PollingSettings.createDefault() diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java index 78c111cdd19..c8a8adef2f7 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java @@ -7,8 +7,7 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serdes.builtin.Int32Serde; @@ -20,6 +19,7 @@ import io.confluent.kafka.schemaregistry.json.JsonSchema; import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema; import java.time.Duration; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.UUID; @@ -497,18 +497,13 @@ public void doAssert(Consumer msgAssert) { String topic = createTopicAndCreateSchemas(); try { messagesService.sendMessage(targetCluster, topic, msgToSend).block(); - TopicMessageDTO polled = messagesService.loadMessages( + TopicMessageDTO polled = messagesService.loadMessagesV2( targetCluster, topic, - new ConsumerPosition( - SeekTypeDTO.BEGINNING, - topic, - Map.of(new TopicPartition(topic, 0), 0L) - ), + new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null), null, null, 1, - SeekDirectionDTO.FORWARD, msgToSend.getKeySerde().get(), msgToSend.getValueSerde().get() ).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index e7108cc7d50..f4508d0f2b9 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -772,9 +772,7 @@ paths: content: application/json: schema: - type: array - items: - $ref: '#/components/schemas/MessageFilterId' + $ref: '#/components/schemas/MessageFilterId' /api/clusters/{clusterName}/topics/{topicName}/messages/v2: @@ -804,7 +802,7 @@ paths: in: query schema: type: array - description: List of target partitions( all partitions if not provided) + description: List of target partitions (all partitions if not provided) items: type: integer - name: lim @@ -824,7 +822,7 @@ paths: type: string - name: offs in: query - description: partition offsets to read from / to. Format is "p1:off1,p2:off2,..." + description: partition offsets to read from / to. Format is "p1:offset1,p2:offset2,...". schema: type: string - name: ts @@ -2571,7 +2569,6 @@ components: - CONSUMING - DONE - CURSOR - - EMIT_THROTTLING message: $ref: "#/components/schemas/TopicMessage" phase: @@ -2708,7 +2705,7 @@ components: - FROM_TIMESTAMP - TO_TIMESTAMP - LATEST - - FIRST + - EARLIEST - TAILING MessageFilterType: From 35c63b8e850052eb8275605e2865d16c96141943 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:21:37 +0400 Subject: [PATCH 04/16] wip --- documentation/compose/kafka-ui.yaml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/documentation/compose/kafka-ui.yaml b/documentation/compose/kafka-ui.yaml index d4f08e9564c..8524f6fa2ba 100644 --- a/documentation/compose/kafka-ui.yaml +++ b/documentation/compose/kafka-ui.yaml @@ -20,6 +20,11 @@ services: KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085 KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083 + KAFKA_CLUSTERS_1_NAME: secondLocal + KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092 + KAFKA_CLUSTERS_1_METRICS_PORT: 9998 + KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085 + DYNAMIC_CONFIG_ENABLED: 'true' kafka0: image: confluentinc/cp-kafka:7.2.1 @@ -40,7 +45,7 @@ services: KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_NODE_ID: 1 - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093,2@kafka1:29093' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' @@ -57,7 +62,7 @@ services: - "9093:9092" - "9998:9998" environment: - KAFKA_BROKER_ID: 2 + KAFKA_BROKER_ID: 1 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 @@ -67,8 +72,8 @@ services: KAFKA_JMX_PORT: 9998 KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998 KAFKA_PROCESS_ROLES: 'broker,controller' - KAFKA_NODE_ID: 2 - KAFKA_CONTROLLER_QUORUM_VOTERS: '2@kafka1:29093,1@kafka0:29093' + KAFKA_NODE_ID: 1 + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' From 7f2f1611bd6f849dadaecba1d7f6b30f40764a0a Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:29:48 +0400 Subject: [PATCH 05/16] wip --- .../kafka/ui/model/ConsumerPosition.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 6bc71edb5f2..666278d26fe 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -20,7 +20,8 @@ public record ConsumerPosition(PollingModeDTO pollingMode, @Nullable Long timestamp, @Nullable Offsets offsets) { - public record Offsets(@Nullable Long offset, + // one of properties will be null + public record Offsets(@Nullable Long offset, //should be applied to all partitions @Nullable Map tpOffsets) { } @@ -29,20 +30,22 @@ public static ConsumerPosition create(PollingModeDTO pollingMode, @Nullable List partitions, @Nullable Long timestamp, @Nullable String offsetsStr) { - Offsets offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); + @Nullable var offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); var topicPartitions = Optional.ofNullable(partitions).orElse(List.of()) .stream() .map(p -> new TopicPartition(topic, p)) .collect(Collectors.toList()); - // if offsets are specified -inferring partitions list from there - topicPartitions = offsets.tpOffsets == null ? topicPartitions : List.copyOf(offsets.tpOffsets.keySet()); + // if offsets are specified - inferring partitions list from there + topicPartitions = (offsets != null && offsets.tpOffsets() != null) + ? List.copyOf(offsets.tpOffsets().keySet()) + : topicPartitions; return new ConsumerPosition( pollingMode, topic, - Optional.ofNullable(topicPartitions).orElse(List.of()), + topicPartitions, validateTimestamp(pollingMode, timestamp), offsets ); @@ -65,7 +68,7 @@ private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode, if (!StringUtils.hasText(offsetsStr)) { throw new ValidationException("offsets not provided for " + pollingMode); } - if (offsetsStr.contains(":")) { + if (!offsetsStr.contains(":")) { offsets = new Offsets(Long.parseLong(offsetsStr), null); } else { Map tpOffsets = Stream.of(offsetsStr.split(",")) From 23157b72341ec918724a1fadc39d6ba9be13f058 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:52:50 +0400 Subject: [PATCH 06/16] wip --- .../ui/emitter/ForwardRecordEmitter.java | 2 +- .../kafka/ui/emitter/OffsetsInfo.java | 5 ++++ .../kafka/ui/emitter/SeekOperations.java | 26 +++++++------------ .../kafka/ui/emitter/TailingEmitter.java | 12 ++------- .../kafka/ui/emitter/SeekOperationsTest.java | 17 ++++++++++-- 5 files changed, 32 insertions(+), 30 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index d60d99d76cf..94b6ce236be 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -35,7 +35,7 @@ public void accept(FluxSink sink) { try (KafkaConsumer consumer = consumerSupplier.get()) { sendPhase(sink, "Assigning partitions"); var seekOperations = SeekOperations.create(consumer, position); - seekOperations.assignAndSeekNonEmptyPartitions(); + seekOperations.assignAndSeek(); EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); while (!sink.isCancelled() diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java index 1b1381ea707..b8c31c0af56 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java @@ -1,6 +1,7 @@ package com.provectus.kafka.ui.emitter; import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; import java.util.Collection; import java.util.HashSet; import java.util.Map; @@ -56,4 +57,8 @@ public boolean assignedPartitionsFullyPolled() { return true; } + public Set allTargetPartitions() { + return Sets.union(nonEmptyPartitions, emptyPartitions); + } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 3d1d02345a7..44f727b20e9 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -21,20 +21,14 @@ class SeekOperations { private final Map offsetsForSeek; //only contains non-empty partitions! static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { - OffsetsInfo offsetsInfo; - if (consumerPosition.partitions().isEmpty()) { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.topic()); - } else { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.partitions()); - } - return new SeekOperations( - consumer, - offsetsInfo, - getOffsetsForSeek(consumer, offsetsInfo, consumerPosition) - ); + OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty() + ? new OffsetsInfo(consumer, consumerPosition.topic()) + : new OffsetsInfo(consumer, consumerPosition.partitions()); + var offsetsToSeek = getOffsetsForSeek(consumer, offsetsInfo, consumerPosition); + return new SeekOperations(consumer, offsetsInfo, offsetsToSeek); } - void assignAndSeekNonEmptyPartitions() { + void assignAndSeek() { consumer.assign(offsetsForSeek.keySet()); offsetsForSeek.forEach(consumer::seek); } @@ -43,10 +37,6 @@ Map getBeginOffsets() { return offsetsInfo.getBeginOffsets(); } - Map getEndOffsets() { - return offsetsInfo.getEndOffsets(); - } - boolean assignedPartitionsFullyPolled() { return offsetsInfo.assignedPartitionsFullyPolled(); } @@ -64,7 +54,9 @@ static Map getOffsetsForSeek(Consumer consumer, OffsetsInfo offsetsInfo, ConsumerPosition position) { switch (position.pollingMode()) { - case LATEST, TAILING: + case TAILING: + return consumer.endOffsets(offsetsInfo.allTargetPartitions()); + case LATEST: return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); case EARLIEST: return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index 458f2d77a64..dee522b01ea 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -3,7 +3,6 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; -import java.util.HashMap; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -30,7 +29,8 @@ public TailingEmitter(Supplier> consumerSupplier, public void accept(FluxSink sink) { log.debug("Starting tailing polling for {}", consumerPosition); try (KafkaConsumer consumer = consumerSupplier.get()) { - assignAndSeek(consumer); + SeekOperations.create(consumer, consumerPosition) + .assignAndSeek(); while (!sink.isCancelled()) { sendPhase(sink, "Polling"); var polled = poll(sink, consumer); @@ -47,12 +47,4 @@ public void accept(FluxSink sink) { } } - private void assignAndSeek(KafkaConsumer consumer) { - var seekOperations = SeekOperations.create(consumer, consumerPosition); - var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end - seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions - consumer.assign(seekOffsets.keySet()); - seekOffsets.forEach(consumer::seek); - } - } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java index 79bda501740..e288e77a113 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java @@ -1,5 +1,8 @@ package com.provectus.kafka.ui.emitter; +import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.TAILING; import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.model.ConsumerPosition; @@ -44,12 +47,22 @@ void initMockConsumer() { @Nested class GetOffsetsForSeek { + @Test + void tailing() { + var offsets = SeekOperations.getOffsetsForSeek( + consumer, + new OffsetsInfo(consumer, topic), + new ConsumerPosition(TAILING, topic, List.of(), null, null) + ); + assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L)); + } + @Test void latest() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - new ConsumerPosition(PollingModeDTO.LATEST, topic, null, null, null) + new ConsumerPosition(LATEST, topic, List.of(), null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L)); } @@ -59,7 +72,7 @@ void beginning() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - new ConsumerPosition(PollingModeDTO.EARLIEST, topic, null, null, null) + new ConsumerPosition(EARLIEST, topic, List.of(), null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L)); } From f5d282e52976d45d125ef4f835963497dc111757 Mon Sep 17 00:00:00 2001 From: iliax Date: Thu, 23 Mar 2023 00:01:07 +0400 Subject: [PATCH 07/16] wip --- .../java/com/provectus/kafka/ui/emitter/MessageFilters.java | 4 ++++ .../java/com/provectus/kafka/ui/service/MessagesService.java | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java index 28a1b20b178..f109289fe49 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java @@ -21,6 +21,10 @@ public class MessageFilters { private MessageFilters() { } + public static Predicate noop() { + return e -> true; + } + public static Predicate containsStringFilter(String string) { return msg -> StringUtils.contains(msg.getKey(), string) || StringUtils.contains(msg.getContent(), string); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 722f0997217..11845bd0c05 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.Random; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; @@ -227,7 +226,7 @@ public String registerMessageFilter(String groovyCode) { private Predicate getMsgFilter(@Nullable String containsStrFilter, @Nullable String filterId, MessageFilterStats filterStats) { - Predicate messageFilter = e -> true; + Predicate messageFilter = MessageFilters.noop(); if (containsStrFilter != null) { messageFilter = MessageFilters.containsStringFilter(containsStrFilter); } From d6244b9b91e9dd0609d3b426678ed15db9fad56d Mon Sep 17 00:00:00 2001 From: iliax Date: Thu, 23 Mar 2023 14:43:19 +0400 Subject: [PATCH 08/16] wip --- kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index f4508d0f2b9..1e27b373ee2 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -2568,7 +2568,6 @@ components: - MESSAGE - CONSUMING - DONE - - CURSOR message: $ref: "#/components/schemas/TopicMessage" phase: From 8f217221e49b8dad57f3499ce0a3fca0b2f7ab0d Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 20:00:45 +0400 Subject: [PATCH 09/16] api updates --- .../ui/controller/MessagesController.java | 86 +++---- .../kafka/ui/emitter/AbstractEmitter.java | 8 +- .../ui/emitter/BackwardRecordEmitter.java | 18 +- .../kafka/ui/emitter/ConsumingStats.java | 15 +- .../provectus/kafka/ui/emitter/Cursor.java | 69 ++++++ .../ui/emitter/ForwardRecordEmitter.java | 20 +- .../kafka/ui/emitter/MessagesProcessing.java | 4 +- .../kafka/ui/emitter/SeekOperations.java | 28 +-- .../kafka/ui/model/ConsumerPosition.java | 37 +-- .../ui/serdes/ConsumerRecordDeserializer.java | 15 +- .../kafka/ui/service/MessagesService.java | 211 ++++++++---------- .../ui/service/PollingCursorsStorage.java | 25 +++ .../kafka/ui/emitter/TailingEmitterTest.java | 2 +- .../kafka/ui/service/MessagesServiceTest.java | 4 +- .../kafka/ui/service/RecordEmitterTest.java | 35 ++- .../kafka/ui/service/SendAndReadTests.java | 2 +- .../main/resources/swagger/kafka-ui-api.yaml | 32 +-- 17 files changed, 335 insertions(+), 276 deletions(-) create mode 100644 kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java create mode 100644 kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index d526f7246df..fed0544fff6 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -25,8 +25,8 @@ import com.provectus.kafka.ui.service.rbac.AccessControlService; import java.util.List; import java.util.Optional; -import javax.annotation.Nullable; import javax.validation.Valid; +import javax.validation.ValidationException; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.http.ResponseEntity; @@ -78,30 +78,45 @@ public Mono>> getTopicMessages(String String keySerde, String valueSerde, ServerWebExchange exchange) { + throw new ValidationException("Not supported"); + } + + + @Override + public Mono>> getTopicMessagesV2(String clusterName, String topicName, + PollingModeDTO mode, + List partitions, + Integer limit, + String stringFilter, + String smartFilterId, + Long offset, + Long timestamp, + String keySerde, + String valueSerde, + String cursor, + ServerWebExchange exchange) { final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_READ) .build()); - seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING; - seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD; - filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS; - - var positions = new ConsumerPosition( - seekType, - topicName, - parseSeekTo(topicName, seekType, seekTo) - ); - Mono>> job = Mono.just( - ResponseEntity.ok( - messagesService.loadMessages( - getCluster(clusterName), topicName, positions, q, filterQueryType, - limit, seekDirection, keySerde, valueSerde) - ) - ); - - return validateAccess.then(job); + Flux messagesFlux; + if (cursor != null) { + messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor); + } else { + messagesFlux = messagesService.loadMessages( + getCluster(clusterName), + topicName, + ConsumerPosition.create(mode, topicName, partitions, timestamp, offset), + stringFilter, + smartFilterId, + limit, + keySerde, + valueSerde + ); + } + return validateAccess.then(Mono.just(ResponseEntity.ok(messagesFlux))); } @Override @@ -149,39 +164,6 @@ public Mono> getSerdes(String clusterNam ); } - - @Override - public Mono>> getTopicMessagesV2(String clusterName, String topicName, - PollingModeDTO mode, - @Nullable List partitions, - @Nullable Integer limit, - @Nullable String query, - @Nullable String filterId, - @Nullable String offsetString, - @Nullable Long ts, - @Nullable String keySerde, - @Nullable String valueSerde, - ServerWebExchange exchange) { - final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() - .cluster(clusterName) - .topic(topicName) - .topicActions(MESSAGES_READ) - .build()); - - ConsumerPosition consumerPosition = ConsumerPosition.create(mode, topicName, partitions, ts, offsetString); - - int recordsLimit = - Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); - - return validateAccess.then( - Mono.just( - ResponseEntity.ok( - messagesService.loadMessagesV2( - getCluster(clusterName), topicName, consumerPosition, - query, filterId, recordsLimit, keySerde, valueSerde)))); - } - - @Override public Mono> registerFilter(String clusterName, String topicName, diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index 11ab8988417..1b9a0efa064 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -3,6 +3,7 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import java.time.Duration; import java.time.Instant; +import javax.annotation.Nullable; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -36,7 +37,7 @@ protected ConsumerRecords poll( return records; } - protected boolean sendLimitReached() { + protected boolean isSendLimitReached() { return messagesProcessing.limitReached(); } @@ -55,8 +56,9 @@ protected int sendConsuming(FluxSink sink, return messagesProcessing.sentConsumingInfo(sink, records, elapsed); } - protected void sendFinishStatsAndCompleteSink(FluxSink sink) { - messagesProcessing.sendFinishEvent(sink); + // cursor is null if target partitions were fully polled (no, need to do paging) + protected void sendFinishStatsAndCompleteSink(FluxSink sink, @Nullable Cursor.Tracking cursor) { + messagesProcessing.sendFinishEvents(sink, cursor); sink.complete(); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java index 78b3c729763..ceb77d3b54e 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java @@ -23,17 +23,20 @@ public class BackwardRecordEmitter extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition consumerPosition; private final int messagesPerPage; + private final Cursor.Tracking cursor; public BackwardRecordEmitter( Supplier> consumerSupplier, ConsumerPosition consumerPosition, int messagesPerPage, MessagesProcessing messagesProcessing, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super(messagesProcessing, pollingSettings); this.consumerPosition = consumerPosition; this.messagesPerPage = messagesPerPage; this.consumerSupplier = consumerSupplier; + this.cursor = cursor; } @Override @@ -45,11 +48,12 @@ public void accept(FluxSink sink) { var seekOperations = SeekOperations.create(consumer, consumerPosition); var readUntilOffsets = new TreeMap(Comparator.comparingInt(TopicPartition::partition)); readUntilOffsets.putAll(seekOperations.getOffsetsForSeek()); + cursor.trackOffsets(readUntilOffsets); int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size()); log.debug("'Until' offsets for polling: {}", readUntilOffsets); - while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) { + while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !isSendLimitReached()) { new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> { if (sink.isCancelled()) { return; //fast return in case of sink cancellation @@ -74,7 +78,12 @@ public void accept(FluxSink sink) { log.debug("sink is cancelled after partitions poll iteration"); } } - sendFinishStatsAndCompleteSink(sink); + sendFinishStatsAndCompleteSink( + sink, + readUntilOffsets.isEmpty() + ? null + : cursor + ); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); @@ -94,6 +103,7 @@ private List> partitionPollIteration( ) { consumer.assign(Collections.singleton(tp)); consumer.seek(tp, fromOffset); + cursor.trackOffset(tp, fromOffset); sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset)); int desiredMsgsToPoll = (int) (toOffset - fromOffset); @@ -101,7 +111,7 @@ private List> partitionPollIteration( EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); while (!sink.isCancelled() - && !sendLimitReached() + && !isSendLimitReached() && recordsToSend.size() < desiredMsgsToPoll && !emptyPolls.noDataEmptyPollsReached()) { var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout()); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java index 0e002f36a44..b4ed63dafa4 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java @@ -2,7 +2,9 @@ import com.provectus.kafka.ui.model.TopicMessageConsumingDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO; import com.provectus.kafka.ui.util.ConsumerRecordsUtil; +import javax.annotation.Nullable; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @@ -17,9 +19,9 @@ class ConsumingStats { * returns bytes polled. */ int sendConsumingEvt(FluxSink sink, - ConsumerRecords polledRecords, - long elapsed, - int filterApplyErrors) { + ConsumerRecords polledRecords, + long elapsed, + int filterApplyErrors) { int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords); bytes += polledBytes; this.records += polledRecords.count(); @@ -32,10 +34,15 @@ int sendConsumingEvt(FluxSink sink, return polledBytes; } - void sendFinishEvent(FluxSink sink, int filterApplyErrors) { + void sendFinishEvent(FluxSink sink, int filterApplyErrors, @Nullable Cursor.Tracking cursor) { sink.next( new TopicMessageEventDTO() .type(TopicMessageEventDTO.TypeEnum.DONE) + .cursor( + cursor != null + ? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor()) + : null + ) .consuming(createConsumingStats(sink, filterApplyErrors)) ); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java new file mode 100644 index 00000000000..d78a583e829 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java @@ -0,0 +1,69 @@ +package com.provectus.kafka.ui.emitter; + +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.TopicMessageDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; +import org.apache.kafka.common.TopicPartition; + +public record Cursor(ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { + + public static class Tracking { + private final ConsumerRecordDeserializer deserializer; + private final ConsumerPosition originalPosition; + private final Predicate filter; + private final int limit; + private final Function cursorRegistry; + + private final Map trackingOffsets = new HashMap<>(); + + public Tracking(ConsumerRecordDeserializer deserializer, + ConsumerPosition originalPosition, + Predicate filter, + int limit, + Function cursorRegistry) { + this.deserializer = deserializer; + this.originalPosition = originalPosition; + this.filter = filter; + this.limit = limit; + this.cursorRegistry = cursorRegistry; + } + + void trackOffset(TopicPartition tp, long offset) { + trackingOffsets.put(tp, offset); + } + + void trackOffsets(Map offsets) { + this.trackingOffsets.putAll(offsets); + } + + String registerCursor() { + return cursorRegistry.apply( + new Cursor( + deserializer, + new ConsumerPosition( + switch (originalPosition.pollingMode()) { + case TO_OFFSET, TO_TIMESTAMP, LATEST -> PollingModeDTO.TO_OFFSET; + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> PollingModeDTO.FROM_OFFSET; + case TAILING -> throw new IllegalStateException(); + }, + originalPosition.topic(), + originalPosition.partitions(), + null, + new ConsumerPosition.Offsets(null, trackingOffsets) + ), + filter, + limit + ) + ); + } + } + +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index a0b6815e5dc..3aedb9f6d5b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -4,6 +4,7 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -12,20 +13,22 @@ import reactor.core.publisher.FluxSink; @Slf4j -public class ForwardRecordEmitter - extends AbstractEmitter { +public class ForwardRecordEmitter extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition position; + private final Cursor.Tracking cursor; public ForwardRecordEmitter( Supplier> consumerSupplier, ConsumerPosition position, MessagesProcessing messagesProcessing, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super(messagesProcessing, pollingSettings); this.position = position; this.consumerSupplier = consumerSupplier; + this.cursor = cursor; } @Override @@ -35,16 +38,18 @@ public void accept(FluxSink sink) { sendPhase(sink, "Assigning partitions"); var seekOperations = SeekOperations.create(consumer, position); seekOperations.assignAndSeek(); + cursor.trackOffsets(seekOperations.getOffsetsForSeek()); EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); while (!sink.isCancelled() - && !sendLimitReached() + && !isSendLimitReached() && !seekOperations.assignedPartitionsFullyPolled() && !emptyPolls.noDataEmptyPollsReached()) { sendPhase(sink, "Polling"); ConsumerRecords records = poll(sink, consumer); emptyPolls.count(records); + trackOffsetsAfterPoll(consumer); log.debug("{} records polled", records.count()); @@ -52,7 +57,7 @@ public void accept(FluxSink sink) { sendMessage(sink, msg); } } - sendFinishStatsAndCompleteSink(sink); + sendFinishStatsAndCompleteSink(sink, seekOperations.assignedPartitionsFullyPolled() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); @@ -62,4 +67,9 @@ public void accept(FluxSink sink) { sink.error(e); } } + + private void trackOffsetsAfterPoll(Consumer consumer) { + consumer.assignment().forEach(tp -> cursor.trackOffset(tp, consumer.position(tp))); + } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java index b6d23bc90d3..59848fac042 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java @@ -63,9 +63,9 @@ int sentConsumingInfo(FluxSink sink, return 0; } - void sendFinishEvent(FluxSink sink) { + void sendFinishEvents(FluxSink sink, @Nullable Cursor.Tracking cursor) { if (!sink.isCancelled()) { - consumingStats.sendFinishEvent(sink, filterApplyErrors); + consumingStats.sendFinishEvent(sink, filterApplyErrors, cursor); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 44f727b20e9..f10be11c2d3 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -1,9 +1,9 @@ package com.provectus.kafka.ui.emitter; import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP; +import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.PollingModeDTO; import java.util.HashMap; @@ -53,22 +53,14 @@ Map getOffsetsForSeek() { static Map getOffsetsForSeek(Consumer consumer, OffsetsInfo offsetsInfo, ConsumerPosition position) { - switch (position.pollingMode()) { - case TAILING: - return consumer.endOffsets(offsetsInfo.allTargetPartitions()); - case LATEST: - return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); - case EARLIEST: - return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); - case FROM_OFFSET, TO_OFFSET: - Preconditions.checkNotNull(position.offsets()); - return fixOffsets(offsetsInfo, position.offsets()); - case FROM_TIMESTAMP, TO_TIMESTAMP: - Preconditions.checkNotNull(position.timestamp()); - return offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, position.timestamp()); - default: - throw new IllegalStateException(); - } + return switch (position.pollingMode()) { + case TAILING -> consumer.endOffsets(offsetsInfo.allTargetPartitions()); + case LATEST -> consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); + case EARLIEST -> consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); + case FROM_OFFSET, TO_OFFSET -> fixOffsets(offsetsInfo, requireNonNull(position.offsets())); + case FROM_TIMESTAMP, TO_TIMESTAMP -> + offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, requireNonNull(position.timestamp())); + }; } private static Map fixOffsets(OffsetsInfo offsetsInfo, @@ -77,7 +69,7 @@ private static Map fixOffsets(OffsetsInfo offsetsInfo, if (positionOffset.offset() != null) { offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset())); } else { - Preconditions.checkNotNull(positionOffset.tpOffsets()); + requireNonNull(positionOffset.tpOffsets()); offsets.putAll(positionOffset.tpOffsets()); offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 666278d26fe..6d09b20b3a5 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -1,18 +1,12 @@ package com.provectus.kafka.ui.model; -import static java.util.stream.Collectors.toMap; - import com.provectus.kafka.ui.exception.ValidationException; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; -import java.util.stream.Stream; import javax.annotation.Nullable; -import org.apache.commons.lang3.tuple.Pair; import org.apache.kafka.common.TopicPartition; -import org.springframework.util.StringUtils; - public record ConsumerPosition(PollingModeDTO pollingMode, String topic, @@ -29,8 +23,8 @@ public static ConsumerPosition create(PollingModeDTO pollingMode, String topic, @Nullable List partitions, @Nullable Long timestamp, - @Nullable String offsetsStr) { - @Nullable var offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); + @Nullable Long offset) { + @Nullable var offsets = parseAndValidateOffsets(pollingMode, offset); var topicPartitions = Optional.ofNullable(partitions).orElse(List.of()) .stream() @@ -61,33 +55,14 @@ private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long } private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode, - String topic, - @Nullable String offsetsStr) { - Offsets offsets = null; + @Nullable Long offset) { if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) { - if (!StringUtils.hasText(offsetsStr)) { + if (offset == null) { throw new ValidationException("offsets not provided for " + pollingMode); } - if (!offsetsStr.contains(":")) { - offsets = new Offsets(Long.parseLong(offsetsStr), null); - } else { - Map tpOffsets = Stream.of(offsetsStr.split(",")) - .map(p -> { - String[] split = p.split(":"); - if (split.length != 2) { - throw new IllegalArgumentException( - "Wrong seekTo argument format. See API docs for details"); - } - return Pair.of( - new TopicPartition(topic, Integer.parseInt(split[0])), - Long.parseLong(split[1]) - ); - }) - .collect(toMap(Pair::getKey, Pair::getValue)); - offsets = new Offsets(null, tpOffsets); - } + return new Offsets(offset, null); } - return offsets; + return null; } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java index 8c7a3024edf..9fdbb5839f1 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java @@ -51,16 +51,11 @@ public TopicMessageDTO deserialize(ConsumerRecord rec) { } private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) { - switch (timestampType) { - case CREATE_TIME: - return TopicMessageDTO.TimestampTypeEnum.CREATE_TIME; - case LOG_APPEND_TIME: - return TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME; - case NO_TIMESTAMP_TYPE: - return TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE; - default: - throw new IllegalArgumentException("Unknown timestampType: " + timestampType); - } + return switch (timestampType) { + case CREATE_TIME -> TopicMessageDTO.TimestampTypeEnum.CREATE_TIME; + case LOG_APPEND_TIME -> TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME; + case NO_TIMESTAMP_TYPE -> TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE; + }; } private void fillHeaders(TopicMessageDTO message, ConsumerRecord rec) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 2303b609097..43ee4f231ac 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -1,8 +1,13 @@ package com.provectus.kafka.ui.service; +import com.google.common.base.Charsets; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.hash.Hashing; import com.google.common.util.concurrent.RateLimiter; import com.provectus.kafka.ui.config.ClustersProperties; import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.Cursor; import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; import com.provectus.kafka.ui.emitter.MessageFilters; import com.provectus.kafka.ui.emitter.MessagesProcessing; @@ -16,6 +21,7 @@ import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serde.api.Serde; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.ProducerRecordCreator; import com.provectus.kafka.ui.util.SslPropertiesUtil; import java.util.List; @@ -23,13 +29,12 @@ import java.util.Optional; import java.util.Properties; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ThreadLocalRandom; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import javax.annotation.Nullable; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.producer.KafkaProducer; @@ -47,6 +52,8 @@ @Slf4j public class MessagesService { + private static final long SALT_FOR_HASHING = ThreadLocalRandom.current().nextLong(); + private static final int DEFAULT_MAX_PAGE_SIZE = 500; private static final int DEFAULT_PAGE_SIZE = 100; // limiting UI messages rate to 20/sec in tailing mode @@ -58,6 +65,12 @@ public class MessagesService { private final int maxPageSize; private final int defaultPageSize; + private final Cache> registeredFilters = CacheBuilder.newBuilder() + .maximumSize(5_000) + .build(); + + private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage(); + public MessagesService(AdminClientService adminClientService, DeserializationService deserializationService, ConsumerGroupService consumerGroupService, @@ -74,8 +87,6 @@ public MessagesService(AdminClientService adminClientService, .orElse(DEFAULT_PAGE_SIZE); } - private final Map> registeredFilters = new ConcurrentHashMap<>(); - private Mono withExistingTopic(KafkaCluster cluster, String topicName) { return adminClientService.get(cluster) .flatMap(client -> client.describeTopic(topicName)) @@ -156,80 +167,12 @@ private Mono sendMessageImpl(KafkaCluster cluster, } } - public Flux loadMessages(KafkaCluster cluster, String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - @Nullable Integer pageSize, - SeekDirectionDTO seekDirection, - @Nullable String keySerde, - @Nullable String valueSerde) { - return withExistingTopic(cluster, topic) - .flux() - .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query, - filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde)); - } - private int fixPageSize(@Nullable Integer pageSize) { return Optional.ofNullable(pageSize) .filter(ps -> ps > 0 && ps <= maxPageSize) .orElse(defaultPageSize); } - private Flux loadMessagesImpl(KafkaCluster cluster, - String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - int limit, - SeekDirectionDTO seekDirection, - @Nullable String keySerde, - @Nullable String valueSerde) { - - java.util.function.Consumer> emitter; - - var processing = new MessagesProcessing( - deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde), - getMsgFilter(query, filterQueryType), - seekDirection == SeekDirectionDTO.TAILING ? null : limit - ); - - if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { - emitter = new ForwardRecordEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - processing, - cluster.getPollingSettings() - ); - } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { - emitter = new BackwardRecordEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - limit, - processing, - cluster.getPollingSettings() - ); - } else { - emitter = new TailingEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - processing, - cluster.getPollingSettings() - ); - } - return Flux.create(emitter) - .map(getDataMasker(cluster, topic)) - .map(throttleUiPublish(seekDirection)); - } - - private Predicate createTakeWhilePredicate( - PollingModeDTO pollingMode, int limit) { - return pollingMode == PollingModeDTO.TAILING - ? evt -> true // no limit for tailing - : new ResultSizeLimiter(limit); - } - private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); @@ -244,83 +187,115 @@ private UnaryOperator getDataMasker(KafkaCluster cluster, }; } - private Flux loadMessagesImplV2(KafkaCluster cluster, - String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - @Nullable String filterId, - int limit, - @Nullable String keySerde, - @Nullable String valueSerde) { + public Flux loadMessages(KafkaCluster cluster, + String topic, + ConsumerPosition consumerPosition, + @Nullable String containsStringFilter, + @Nullable String filterId, + @Nullable Integer limit, + @Nullable String keySerde, + @Nullable String valueSerde) { + return loadMessages( + cluster, + topic, + deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde), + consumerPosition, + getMsgFilter(containsStringFilter, filterId), + fixPageSize(limit) + ); + } + + public Flux loadMessages(KafkaCluster cluster, String topic, String cursorId) { + Cursor cursor = cursorsStorage.getCursor(cursorId) + .orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache.")); + return loadMessages( + cluster, + topic, + cursor.deserializer(), + cursor.consumerPosition(), + cursor.filter(), + cursor.limit() + ); + } + + private Flux loadMessages(KafkaCluster cluster, + String topic, + ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { + return withExistingTopic(cluster, topic) + .flux() + .publishOn(Schedulers.boundedElastic()) + .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, fixPageSize(limit))); + } - ConsumerRecordDeserializer recordDeserializer = - deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); + private Flux loadMessagesImpl(KafkaCluster cluster, + String topic, + ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { + var processing = new MessagesProcessing( + deserializer, + filter, + consumerPosition.pollingMode() == PollingModeDTO.TAILING ? null : limit + ); var emitter = switch (consumerPosition.pollingMode()) { case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, limit, - recordDeserializer, - cluster.getPollingSettings() + processing, + cluster.getPollingSettings(), + new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) ); case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - recordDeserializer, - cluster.getPollingSettings() + processing, + cluster.getPollingSettings(), + new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) ); case TAILING -> new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - recordDeserializer, + processing, cluster.getPollingSettings() ); }; - - MessageFilterStats filterStats = new MessageFilterStats(); return Flux.create(emitter) - .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) - .filter(getMsgFilter(query, filterId, filterStats)) .map(getDataMasker(cluster, topic)) - .takeWhile(createTakeWhilePredicate(consumerPosition.pollingMode(), limit)) .map(throttleUiPublish(consumerPosition.pollingMode())); } public String registerMessageFilter(String groovyCode) { - var filter = MessageFilters.groovyScriptFilter(groovyCode); - var id = RandomStringUtils.random(10, true, true); - registeredFilters.put(id, filter); - return id; + String saltedCode = groovyCode + SALT_FOR_HASHING; + String filterId = Hashing.sha256() + .hashString(saltedCode, Charsets.UTF_8) + .toString() + .substring(0, 8); + if (registeredFilters.getIfPresent(filterId) == null) { + registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode)); + } + return filterId; } - private Predicate getMsgFilter(@Nullable String containsStrFilter, - @Nullable String filterId, - MessageFilterStats filterStats) { + private Predicate getMsgFilter(@Nullable String containsStrFilter, + @Nullable String smartFilterId) { Predicate messageFilter = MessageFilters.noop(); if (containsStrFilter != null) { - messageFilter = MessageFilters.containsStringFilter(containsStrFilter); + messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter)); } - if (filterId != null) { - messageFilter = registeredFilters.get(filterId); - if (messageFilter == null) { - throw new ValidationException("No filter was registered with id " + filterId); + if (smartFilterId != null) { + var registered = registeredFilters.getIfPresent(smartFilterId); + if (registered == null) { + throw new ValidationException("No filter was registered with id " + smartFilterId); } + messageFilter = messageFilter.and(registered); } - Predicate finalMessageFilter = messageFilter; - return evt -> { - // we only apply filter for message events - if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) { - try { - return finalMessageFilter.test(evt.getMessage()); - } catch (Exception e) { - filterStats.incrementApplyErrors(); - log.trace("Error applying filter for message {}", evt.getMessage()); - return false; - } - } - return true; - }; + return messageFilter; } private UnaryOperator throttleUiPublish(PollingModeDTO pollingMode) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java new file mode 100644 index 00000000000..a789c2afbe2 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -0,0 +1,25 @@ +package com.provectus.kafka.ui.service; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.provectus.kafka.ui.emitter.Cursor; +import java.util.Optional; +import org.apache.commons.lang3.RandomStringUtils; + +public class PollingCursorsStorage { + + private final Cache cursorsCache = CacheBuilder.newBuilder() + .maximumSize(10_000) + .build(); + + public Optional getCursor(String id) { + return Optional.ofNullable(cursorsCache.getIfPresent(id)); + } + + public String register(Cursor cursor) { + var id = RandomStringUtils.random(8, true, true); + cursorsCache.put(id, cursor); + return id; + } + +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java index 6585ba840ea..972a573bab9 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java @@ -109,7 +109,7 @@ private Flux createTailingFlux( .get(); return applicationContext.getBean(MessagesService.class) - .loadMessagesV2(cluster, topicName, + .loadMessages(cluster, topicName, new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null), query, null, diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index fa2ad19aace..fbaa0748bdb 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -54,7 +54,7 @@ void sendMessageReturnsExceptionWhenTopicNotFound() { @Test void loadMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService - .loadMessagesV2(cluster, NON_EXISTING_TOPIC, + .loadMessages(cluster, NON_EXISTING_TOPIC, new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null), null, null, 1, "String", "String")) .expectError(TopicNotFoundException.class) @@ -69,7 +69,7 @@ void maskingAppliedOnConfiguredClusters() throws Exception { producer.send(testTopic, "message1"); producer.send(testTopic, "message2").get(); - Flux msgsFlux = messagesService.loadMessagesV2( + Flux msgsFlux = messagesService.loadMessages( cluster, testTopic, new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java index 2bdb630b204..92d896a2967 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java @@ -10,6 +10,7 @@ import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.Cursor; import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; import com.provectus.kafka.ui.emitter.MessagesProcessing; import com.provectus.kafka.ui.emitter.PollingSettings; @@ -45,6 +46,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; import reactor.core.publisher.Flux; import reactor.core.publisher.FluxSink; import reactor.test.StepVerifier; @@ -59,6 +61,7 @@ class RecordEmitterTest extends AbstractIntegrationTest { static final String EMPTY_TOPIC = TOPIC + "_empty"; static final List SENT_RECORDS = new ArrayList<>(); static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer(); + static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class); @BeforeAll static void generateMsgs() throws Exception { @@ -121,7 +124,8 @@ void pollNothingOnEmptyTopic() { this::createConsumer, new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardRecordEmitter( @@ -129,7 +133,8 @@ void pollNothingOnEmptyTopic() { new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); StepVerifier.create(Flux.create(forwardEmitter)) @@ -150,8 +155,9 @@ void pollFullTopicFromBeginning() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null), - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardRecordEmitter( @@ -159,7 +165,8 @@ void pollFullTopicFromBeginning() { new ConsumerPosition(LATEST, TOPIC, List.of(), null, null), PARTITIONS * MSGS_PER_PARTITION, createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); List expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()); @@ -181,7 +188,8 @@ void pollWithOffsets() { new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, new Offsets(null, targetOffsets)), createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardRecordEmitter( @@ -190,7 +198,8 @@ void pollWithOffsets() { new Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var expectedValues = SENT_RECORDS.stream() @@ -218,7 +227,8 @@ void pollWithTimestamps() { this::createConsumer, new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter( @@ -234,7 +244,8 @@ void pollWithTimestamps() { new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), PARTITIONS * MSGS_PER_PARTITION, createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter( @@ -260,7 +271,8 @@ void backwardEmitterSeekToEnd() { new Offsets(null, targetOffsets)), numMessages, createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var expectedValues = SENT_RECORDS.stream() @@ -286,7 +298,8 @@ void backwardEmitterSeekToBegin() { new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)), 100, createMessagesProcessing(), - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter(backwardEmitter, diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java index c8a8adef2f7..a9639ca3893 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java @@ -497,7 +497,7 @@ public void doAssert(Consumer msgAssert) { String topic = createTopicAndCreateSchemas(); try { messagesService.sendMessage(targetCluster, topic, msgToSend).block(); - TopicMessageDTO polled = messagesService.loadMessagesV2( + TopicMessageDTO polled = messagesService.loadMessages( targetCluster, topic, new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null), diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index ea94d925d99..319be5cd53e 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -792,55 +792,61 @@ paths: required: true schema: type: string - - name: m + - name: mode in: query description: Messages polling mode required: true schema: $ref: "#/components/schemas/PollingMode" - - name: p + - name: partitions in: query schema: type: array description: List of target partitions (all partitions if not provided) items: type: integer - - name: lim + - name: limit in: query description: Max number of messages can be returned schema: type: integer - - name: q + - name: stringFilter in: query description: query string to contains string filtration schema: type: string - - name: fid + - name: smartFilterId in: query description: filter id, that was registered beforehand schema: type: string - - name: offs + - name: offset in: query - description: partition offsets to read from / to. Format is "p1:offset1,p2:offset2,...". + description: message offset to read from / to schema: - type: string - - name: ts + type: integer + format: int64 + - name: timestamp in: query description: timestamp (in ms) to read from / to schema: type: integer format: int64 - - name: ks + - name: keySerde in: query description: "Serde that should be used for deserialization. Will be chosen automatically if not set." schema: type: string - - name: vs + - name: valueSerde in: query description: "Serde that should be used for deserialization. Will be chosen automatically if not set." schema: type: string + - name: cursor + in: query + description: "id of the cursor for pagination" + schema: + type: string responses: 200: description: OK @@ -2644,10 +2650,8 @@ components: TopicMessageNextPageCursor: type: object properties: - offsetsString: + id: type: string - pollingMode: - $ref: "#/components/schemas/PollingMode" TopicMessage: type: object From 153745e9e871eb947c6c9ae7bd519937fd874a49 Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 23:21:32 +0400 Subject: [PATCH 10/16] new tests --- .../ui/emitter/ForwardRecordEmitter.java | 22 ++- .../kafka/ui/service/MessagesService.java | 1 + .../ui/service/PollingCursorsStorage.java | 6 + .../kafka/ui/emitter/CursorTest.java | 181 ++++++++++++++++++ .../kafka/ui/service/MessagesServiceTest.java | 108 +++++++++-- 5 files changed, 288 insertions(+), 30 deletions(-) create mode 100644 kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index 3aedb9f6d5b..cd73aa08450 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -4,10 +4,10 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @@ -49,15 +49,21 @@ public void accept(FluxSink sink) { sendPhase(sink, "Polling"); ConsumerRecords records = poll(sink, consumer); emptyPolls.count(records); - trackOffsetsAfterPoll(consumer); - log.debug("{} records polled", records.count()); - for (ConsumerRecord msg : records) { - sendMessage(sink, msg); + for (TopicPartition tp : records.partitions()) { + for (ConsumerRecord record : records.records(tp)) { + // checking if send limit reached - if so, we will skip some + // of already polled records (and we don't need to track their offsets) - they + // should be present on next page, polled by cursor + if (!isSendLimitReached()) { + sendMessage(sink, record); + cursor.trackOffset(tp, record.offset() + 1); + } + } } } - sendFinishStatsAndCompleteSink(sink, seekOperations.assignedPartitionsFullyPolled() ? null : cursor); + sendFinishStatsAndCompleteSink(sink, !isSendLimitReached() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); @@ -68,8 +74,4 @@ public void accept(FluxSink sink) { } } - private void trackOffsetsAfterPoll(Consumer consumer) { - consumer.assignment().forEach(tp -> cursor.trackOffset(tp, consumer.position(tp))); - } - } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 43ee4f231ac..b83da33bd74 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -56,6 +56,7 @@ public class MessagesService { private static final int DEFAULT_MAX_PAGE_SIZE = 500; private static final int DEFAULT_PAGE_SIZE = 100; + // limiting UI messages rate to 20/sec in tailing mode private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java index a789c2afbe2..30f6ef92e03 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -1,8 +1,10 @@ package com.provectus.kafka.ui.service; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.provectus.kafka.ui.emitter.Cursor; +import java.util.Map; import java.util.Optional; import org.apache.commons.lang3.RandomStringUtils; @@ -22,4 +24,8 @@ public String register(Cursor cursor) { return id; } + @VisibleForTesting + public Map asMap() { + return cursorsCache.asMap(); + } } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java new file mode 100644 index 00000000000..e36be49320d --- /dev/null +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -0,0 +1,181 @@ +package com.provectus.kafka.ui.emitter; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.provectus.kafka.ui.AbstractIntegrationTest; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.producer.KafkaTestProducer; +import com.provectus.kafka.ui.serde.api.Serde; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import com.provectus.kafka.ui.serdes.PropertyResolverImpl; +import com.provectus.kafka.ui.serdes.builtin.StringSerde; +import com.provectus.kafka.ui.service.PollingCursorsStorage; +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.BytesDeserializer; +import org.apache.kafka.common.utils.Bytes; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + + +public class CursorTest extends AbstractIntegrationTest { + + static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID(); + + static final int MSGS_IN_PARTITION = 20; + static final int PAGE_SIZE = 11; + + @BeforeAll + static void setup() { + createTopic(new NewTopic(TOPIC, 1, (short) 1)); + try (var producer = KafkaTestProducer.forKafka(kafka)) { + for (int i = 0; i < MSGS_IN_PARTITION; i++) { + producer.send(new ProducerRecord<>(TOPIC, "msg_" + i)); + } + } + } + + @AfterAll + static void cleanup() { + deleteTopic(TOPIC); + } + + @Test + void backwardEmitter() { + var cursorsStorage = new PollingCursorsStorage(); + var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null); + + var cursor = new Cursor.Tracking( + createRecordsDeserializer(), + consumerPosition, + m -> true, + PAGE_SIZE, + cursorsStorage::register + ); + + var emitter = createBackwardEmitter(consumerPosition, cursor); + verifyMessagesEmitted(emitter); + assertCursor( + cursorsStorage, + PollingModeDTO.TO_OFFSET, + offsets -> assertThat(offsets) + .hasSize(1) + .containsEntry(new TopicPartition(TOPIC, 0), 9L) + ); + } + + @Test + void forwardEmitter() { + var cursorsStorage = new PollingCursorsStorage(); + var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null); + + var cursor = new Cursor.Tracking( + createRecordsDeserializer(), + consumerPosition, + m -> true, + PAGE_SIZE, + cursorsStorage::register + ); + + var emitter = createForwardEmitter(consumerPosition, cursor); + verifyMessagesEmitted(emitter); + assertCursor( + cursorsStorage, + PollingModeDTO.FROM_OFFSET, + offsets -> assertThat(offsets) + .hasSize(1) + .containsEntry(new TopicPartition(TOPIC, 0), 11L) + ); + } + + private void assertCursor(PollingCursorsStorage storage, + PollingModeDTO expectedMode, + Consumer> offsetsAssert) { + Cursor registeredCursor = storage.asMap().values().stream().findFirst().orElse(null); + assertThat(registeredCursor).isNotNull(); + assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE); + assertThat(registeredCursor.deserializer()).isNotNull(); + assertThat(registeredCursor.filter()).isNotNull(); + + var cursorPosition = registeredCursor.consumerPosition(); + assertThat(cursorPosition).isNotNull(); + assertThat(cursorPosition.topic()).isEqualTo(TOPIC); + assertThat(cursorPosition.partitions()).isEqualTo(List.of()); + assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode); + + offsetsAssert.accept(cursorPosition.offsets().tpOffsets()); + } + + private void verifyMessagesEmitted(AbstractEmitter emitter) { + StepVerifier.create( + Flux.create(emitter) + .filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(e -> e.getMessage().getContent()) + ) + .expectNextCount(PAGE_SIZE) + .verifyComplete(); + } + + private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + return new BackwardRecordEmitter( + this::createConsumer, + position, + PAGE_SIZE, + new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + PollingSettings.createDefault(), + cursor + ); + } + + private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + return new ForwardRecordEmitter( + this::createConsumer, + position, + new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + PollingSettings.createDefault(), + cursor + ); + } + + private KafkaConsumer createConsumer() { + final Map map = Map.of( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(), + ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(), + ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1, // to check multiple polls + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class + ); + Properties props = new Properties(); + props.putAll(map); + return new KafkaConsumer<>(props); + } + + private static ConsumerRecordDeserializer createRecordsDeserializer() { + Serde s = new StringSerde(); + s.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty()); + return new ConsumerRecordDeserializer( + StringSerde.name(), + s.deserializer(null, Serde.Target.KEY), + StringSerde.name(), + s.deserializer(null, Serde.Target.VALUE), + StringSerde.name(), + s.deserializer(null, Serde.Target.KEY), + s.deserializer(null, Serde.Target.VALUE) + ); + } + +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index fbaa0748bdb..fcc94fd5df3 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -1,5 +1,7 @@ package com.provectus.kafka.ui.service; +import static org.assertj.core.api.Assertions.assertThat; + import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.model.ConsumerPosition; @@ -10,11 +12,17 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serdes.builtin.StringSerde; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; import org.apache.kafka.clients.admin.NewTopic; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.springframework.beans.factory.annotation.Autowired; import reactor.core.publisher.Flux; import reactor.test.StepVerifier; @@ -29,6 +37,8 @@ class MessagesServiceTest extends AbstractIntegrationTest { KafkaCluster cluster; + Set createdTopics = new HashSet<>(); + @BeforeEach void init() { cluster = applicationContext @@ -37,6 +47,11 @@ void init() { .get(); } + @AfterEach + void deleteCreatedTopics() { + createdTopics.forEach(MessagesServiceTest::deleteTopic); + } + @Test void deleteTopicMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService.deleteTopicMessages(cluster, NON_EXISTING_TOPIC, List.of())) @@ -64,31 +79,84 @@ void loadMessagesReturnsExceptionWhenTopicNotFound() { @Test void maskingAppliedOnConfiguredClusters() throws Exception { String testTopic = MASKED_TOPICS_PREFIX + UUID.randomUUID(); + createTopicWithCleanup(new NewTopic(testTopic, 1, (short) 1)); + try (var producer = KafkaTestProducer.forKafka(kafka)) { - createTopic(new NewTopic(testTopic, 1, (short) 1)); producer.send(testTopic, "message1"); producer.send(testTopic, "message2").get(); + } + + Flux msgsFlux = messagesService.loadMessages( + cluster, + testTopic, + new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), + null, + null, + 100, + StringSerde.name(), + StringSerde.name() + ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(TopicMessageEventDTO::getMessage); + + // both messages should be masked + StepVerifier.create(msgsFlux) + .expectNextMatches(msg -> msg.getContent().equals("***")) + .expectNextMatches(msg -> msg.getContent().equals("***")) + .verifyComplete(); + } + + @ParameterizedTest + @CsvSource({"EARLIEST", "LATEST"}) + void cursorIsRegisteredAfterPollingIsDoneAndCanBeUsedForNextPagePolling(PollingModeDTO mode) { + String testTopic = MessagesServiceTest.class.getSimpleName() + UUID.randomUUID(); + createTopicWithCleanup(new NewTopic(testTopic, 5, (short) 1)); - Flux msgsFlux = messagesService.loadMessages( - cluster, - testTopic, - new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), - null, - null, - 100, - StringSerde.name(), - StringSerde.name() - ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) - .map(TopicMessageEventDTO::getMessage); - - // both messages should be masked - StepVerifier.create(msgsFlux) - .expectNextMatches(msg -> msg.getContent().equals("***")) - .expectNextMatches(msg -> msg.getContent().equals("***")) - .verifyComplete(); - } finally { - deleteTopic(testTopic); + int msgsToGenerate = 100; + int pageSize = (msgsToGenerate / 2) + 1; + + try (var producer = KafkaTestProducer.forKafka(kafka)) { + for (int i = 0; i < msgsToGenerate; i++) { + producer.send(testTopic, "message_" + i); + } } + + var cursorIdCatcher = new AtomicReference(); + Flux msgsFlux = messagesService.loadMessages( + cluster, testTopic, + new ConsumerPosition(mode, testTopic, List.of(), null, null), + null, null, pageSize, StringSerde.name(), StringSerde.name()) + .doOnNext(evt -> { + if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) { + assertThat(evt.getCursor()).isNotNull(); + cursorIdCatcher.set(evt.getCursor().getId()); + } + }) + .filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(evt -> evt.getMessage().getContent()); + + StepVerifier.create(msgsFlux) + .expectNextCount(pageSize) + .verifyComplete(); + + assertThat(cursorIdCatcher.get()).isNotNull(); + + Flux remainingMsgs = messagesService.loadMessages(cluster, testTopic, cursorIdCatcher.get()) + .doOnNext(evt -> { + if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) { + assertThat(evt.getCursor()).isNull(); + } + }) + .filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(evt -> evt.getMessage().getContent()); + + StepVerifier.create(remainingMsgs) + .expectNextCount(msgsToGenerate - pageSize) + .verifyComplete(); + } + + private void createTopicWithCleanup(NewTopic newTopic) { + createTopic(newTopic); + createdTopics.add(newTopic.name()); } } From f05e8bbae3d802dfa7754b32d3785199b067ef1d Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 23:40:49 +0400 Subject: [PATCH 11/16] new tests --- .../kafka/ui/emitter/CursorTest.java | 105 +++++++++++------- 1 file changed, 63 insertions(+), 42 deletions(-) diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java index e36be49320d..cd849bcfd92 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -31,13 +31,13 @@ import reactor.core.publisher.Flux; import reactor.test.StepVerifier; - -public class CursorTest extends AbstractIntegrationTest { +class CursorTest extends AbstractIntegrationTest { static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID(); - static final int MSGS_IN_PARTITION = 20; - static final int PAGE_SIZE = 11; + static final int PAGE_SIZE = (MSGS_IN_PARTITION / 2) + 1; //to poll fill data set in 2 iterations + + final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage(); @BeforeAll static void setup() { @@ -56,56 +56,45 @@ static void cleanup() { @Test void backwardEmitter() { - var cursorsStorage = new PollingCursorsStorage(); var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null); - - var cursor = new Cursor.Tracking( - createRecordsDeserializer(), - consumerPosition, - m -> true, - PAGE_SIZE, - cursorsStorage::register - ); - - var emitter = createBackwardEmitter(consumerPosition, cursor); - verifyMessagesEmitted(emitter); - assertCursor( - cursorsStorage, + var emitter = createBackwardEmitter(consumerPosition); + emitMessages(emitter, PAGE_SIZE); + var cursor = assertCursor( PollingModeDTO.TO_OFFSET, offsets -> assertThat(offsets) .hasSize(1) .containsEntry(new TopicPartition(TOPIC, 0), 9L) ); + + // polling remaining records using registered cursor + emitter = createBackwardEmitterWithCursor(cursor); + emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE); + //checking no new cursors registered + assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor); } @Test void forwardEmitter() { - var cursorsStorage = new PollingCursorsStorage(); var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null); - - var cursor = new Cursor.Tracking( - createRecordsDeserializer(), - consumerPosition, - m -> true, - PAGE_SIZE, - cursorsStorage::register - ); - - var emitter = createForwardEmitter(consumerPosition, cursor); - verifyMessagesEmitted(emitter); - assertCursor( - cursorsStorage, + var emitter = createForwardEmitter(consumerPosition); + emitMessages(emitter, PAGE_SIZE); + var cursor = assertCursor( PollingModeDTO.FROM_OFFSET, offsets -> assertThat(offsets) .hasSize(1) .containsEntry(new TopicPartition(TOPIC, 0), 11L) ); + + //polling remaining records using registered cursor + emitter = createForwardEmitterWithCursor(cursor); + emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE); + //checking no new cursors registered + assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor); } - private void assertCursor(PollingCursorsStorage storage, - PollingModeDTO expectedMode, - Consumer> offsetsAssert) { - Cursor registeredCursor = storage.asMap().values().stream().findFirst().orElse(null); + private Cursor assertCursor(PollingModeDTO expectedMode, + Consumer> offsetsAssert) { + Cursor registeredCursor = cursorsStorage.asMap().values().stream().findFirst().orElse(null); assertThat(registeredCursor).isNotNull(); assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE); assertThat(registeredCursor.deserializer()).isNotNull(); @@ -118,36 +107,68 @@ private void assertCursor(PollingCursorsStorage storage, assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode); offsetsAssert.accept(cursorPosition.offsets().tpOffsets()); + return registeredCursor; } - private void verifyMessagesEmitted(AbstractEmitter emitter) { + private void emitMessages(AbstractEmitter emitter, int expectedCnt) { StepVerifier.create( Flux.create(emitter) .filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) .map(e -> e.getMessage().getContent()) ) - .expectNextCount(PAGE_SIZE) + .expectNextCount(expectedCnt) .verifyComplete(); } - private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position) { return new BackwardRecordEmitter( this::createConsumer, position, PAGE_SIZE, new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), PollingSettings.createDefault(), - cursor + createCursor(position) ); } - private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + private BackwardRecordEmitter createBackwardEmitterWithCursor(Cursor cursor) { + return new BackwardRecordEmitter( + this::createConsumer, + cursor.consumerPosition(), + cursor.limit(), + new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + PollingSettings.createDefault(), + createCursor(cursor.consumerPosition()) + ); + } + + private ForwardRecordEmitter createForwardEmitterWithCursor(Cursor cursor) { + return new ForwardRecordEmitter( + this::createConsumer, + cursor.consumerPosition(), + new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + PollingSettings.createDefault(), + createCursor(cursor.consumerPosition()) + ); + } + + private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position) { return new ForwardRecordEmitter( this::createConsumer, position, new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), PollingSettings.createDefault(), - cursor + createCursor(position) + ); + } + + private Cursor.Tracking createCursor(ConsumerPosition position) { + return new Cursor.Tracking( + createRecordsDeserializer(), + position, + m -> true, + PAGE_SIZE, + cursorsStorage::register ); } From 911bbc8684769da09f10336e677c2ae6570c2521 Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 23:54:14 +0400 Subject: [PATCH 12/16] minor improvements --- .../provectus/kafka/ui/emitter/BackwardRecordEmitter.java | 7 +------ .../com/provectus/kafka/ui/service/MessagesService.java | 2 +- .../provectus/kafka/ui/service/PollingCursorsStorage.java | 4 +++- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java index ceb77d3b54e..4b017b41431 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java @@ -78,12 +78,7 @@ public void accept(FluxSink sink) { log.debug("sink is cancelled after partitions poll iteration"); } } - sendFinishStatsAndCompleteSink( - sink, - readUntilOffsets.isEmpty() - ? null - : cursor - ); + sendFinishStatsAndCompleteSink(sink, readUntilOffsets.isEmpty() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index b83da33bd74..09007babdeb 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -67,7 +67,7 @@ public class MessagesService { private final int defaultPageSize; private final Cache> registeredFilters = CacheBuilder.newBuilder() - .maximumSize(5_000) + .maximumSize(PollingCursorsStorage.MAX_SIZE) .build(); private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage(); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java index 30f6ef92e03..654a6dd4bee 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -10,8 +10,10 @@ public class PollingCursorsStorage { + public static final int MAX_SIZE = 10_000; + private final Cache cursorsCache = CacheBuilder.newBuilder() - .maximumSize(10_000) + .maximumSize(MAX_SIZE) .build(); public Optional getCursor(String id) { From b5a52fc4325172e36a9a1f5da61aeda0dd0d390b Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 11 Jul 2023 12:03:32 +0400 Subject: [PATCH 13/16] PR comments fx --- .../java/com/provectus/kafka/ui/model/ConsumerPosition.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 6d09b20b3a5..51f4e51f7c6 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -1,5 +1,6 @@ package com.provectus.kafka.ui.model; +import com.google.common.base.Preconditions; import com.provectus.kafka.ui.exception.ValidationException; import java.util.List; import java.util.Map; @@ -14,9 +15,12 @@ public record ConsumerPosition(PollingModeDTO pollingMode, @Nullable Long timestamp, @Nullable Offsets offsets) { - // one of properties will be null public record Offsets(@Nullable Long offset, //should be applied to all partitions @Nullable Map tpOffsets) { + public Offsets { + // only one of properties should be set + Preconditions.checkArgument((offset == null && tpOffsets != null) || (offset != null && tpOffsets == null)); + } } public static ConsumerPosition create(PollingModeDTO pollingMode, From e04928ca0d421aa29b6cb6ed17feb4af436cd0d9 Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 1 Aug 2023 15:51:37 +0400 Subject: [PATCH 14/16] master merge --- .../ui/controller/MessagesController.java | 9 ++- .../kafka/ui/service/MessagesService.java | 70 ++++++++++++++++--- .../kafka/ui/service/MessagesServiceTest.java | 3 + 3 files changed, 69 insertions(+), 13 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index f2c92787b3c..fbd0a0ded4e 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -108,11 +108,12 @@ public Mono>> getTopicMessagesV2(Strin String valueSerde, String cursor, ServerWebExchange exchange) { - final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + var context = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_READ) - .build()); + .operationName("getTopicMessages") + .build(); Flux messagesFlux; if (cursor != null) { @@ -129,7 +130,9 @@ public Mono>> getTopicMessagesV2(Strin valueSerde ); } - return validateAccess.then(Mono.just(ResponseEntity.ok(messagesFlux))); + return accessControlService.validateAccess(context) + .then(Mono.just(ResponseEntity.ok(messagesFlux))) + .doOnEach(sig -> auditService.audit(context, sig)); } @Override diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index b2457a1c0a5..194cd81300f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -17,7 +17,11 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.SeekDirectionDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serde.api.Serde; @@ -100,10 +104,7 @@ private Mono withExistingTopic(KafkaCluster cluster, String to public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) { Predicate predicate; try { - predicate = MessageFilters.createMsgFilter( - execData.getFilterCode(), - MessageFilterTypeDTO.GROOVY_SCRIPT - ); + predicate = MessageFilters.groovyScriptFilter(execData.getFilterCode()); } catch (Exception e) { log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e); return new SmartFilterTestExecutionResultDTO() @@ -211,18 +212,47 @@ public static KafkaProducer createProducer(KafkaCluster cluster, return new KafkaProducer<>(properties); } - public Flux loadMessages(KafkaCluster cluster, String topic, + public Flux loadMessages(KafkaCluster cluster, + String topic, ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - @Nullable Integer pageSize, - SeekDirectionDTO seekDirection, + @Nullable String containsStringFilter, + @Nullable String filterId, + @Nullable Integer limit, @Nullable String keySerde, @Nullable String valueSerde) { + return loadMessages( + cluster, + topic, + deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde), + consumerPosition, + getMsgFilter(containsStringFilter, filterId), + fixPageSize(limit) + ); + } + + public Flux loadMessages(KafkaCluster cluster, String topic, String cursorId) { + Cursor cursor = cursorsStorage.getCursor(cursorId) + .orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache.")); + return loadMessages( + cluster, + topic, + cursor.deserializer(), + cursor.consumerPosition(), + cursor.filter(), + cursor.limit() + ); + } + + private Flux loadMessages(KafkaCluster cluster, + String topic, + ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, fixPageSize(limit))); + .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, limit)); } private Flux loadMessagesImpl(KafkaCluster cluster, @@ -265,6 +295,12 @@ private Flux loadMessagesImpl(KafkaCluster cluster, .map(throttleUiPublish(consumerPosition.pollingMode())); } + private int fixPageSize(@Nullable Integer pageSize) { + return Optional.ofNullable(pageSize) + .filter(ps -> ps > 0 && ps <= maxPageSize) + .orElse(defaultPageSize); + } + public String registerMessageFilter(String groovyCode) { String saltedCode = groovyCode + SALT_FOR_HASHING; String filterId = Hashing.sha256() @@ -277,6 +313,20 @@ public String registerMessageFilter(String groovyCode) { return filterId; } + private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { + var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); + var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); + return evt -> { + if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { + return evt; + } + return evt.message( + evt.getMessage() + .key(keyMasker.apply(evt.getMessage().getKey())) + .content(valMasker.apply(evt.getMessage().getContent()))); + }; + } + private Predicate getMsgFilter(@Nullable String containsStrFilter, @Nullable String smartFilterId) { Predicate messageFilter = MessageFilters.noop(); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index fdb3834746a..2f7d1868807 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -1,5 +1,6 @@ package com.provectus.kafka.ui.service; +import static com.provectus.kafka.ui.service.MessagesService.execSmartFilterTest; import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.AbstractIntegrationTest; @@ -8,12 +9,14 @@ import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serdes.builtin.StringSerde; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; From c845786ba96d41ae1d41b7da035b41e68ec3d772 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 2 Aug 2023 12:47:42 +0400 Subject: [PATCH 15/16] master merge --- .../kafka/ui/emitter/AbstractEmitter.java | 1 + .../ui/emitter/ForwardRecordEmitter.java | 2 -- .../kafka/ui/emitter/PolledRecords.java | 5 +++++ .../kafka/ui/emitter/TailingEmitter.java | 1 + .../kafka/ui/emitter/CursorTest.java | 20 ++++++------------- 5 files changed, 13 insertions(+), 16 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index 56912f8c80a..d8d5b7482d8 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -1,6 +1,7 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import jakarta.annotation.Nullable; import java.time.Duration; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.utils.Bytes; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index bbb89a058d7..d76d265fcb5 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -5,8 +5,6 @@ import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.utils.Bytes; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java index bc6bd95d5f6..94169f1b634 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java @@ -3,6 +3,7 @@ import java.time.Duration; import java.util.Iterator; import java.util.List; +import java.util.Set; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.TopicPartition; @@ -32,6 +33,10 @@ public Iterator> iterator() { return records.iterator(); } + public Set partitions() { + return records.partitions(); + } + private static int calculatePolledRecSize(Iterable> recs) { int polledBytes = 0; for (ConsumerRecord rec : recs) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index df0027fc388..024e8ba399f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -2,6 +2,7 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.common.errors.InterruptException; import reactor.core.publisher.FluxSink; diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java index cd849bcfd92..06dffac83f4 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -12,7 +12,7 @@ import com.provectus.kafka.ui.serdes.PropertyResolverImpl; import com.provectus.kafka.ui.serdes.builtin.StringSerde; import com.provectus.kafka.ui.service.PollingCursorsStorage; -import java.io.Serializable; +import com.provectus.kafka.ui.util.ApplicationMetrics; import java.util.List; import java.util.Map; import java.util.Properties; @@ -20,11 +20,8 @@ import java.util.function.Consumer; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.serialization.BytesDeserializer; -import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -172,17 +169,12 @@ private Cursor.Tracking createCursor(ConsumerPosition position) { ); } - private KafkaConsumer createConsumer() { - final Map map = Map.of( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(), - ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(), - ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1, // to check multiple polls - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class, - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class - ); + private EnhancedConsumer createConsumer() { Properties props = new Properties(); - props.putAll(map); - return new KafkaConsumer<>(props); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers()); + props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1); // to check multiple polls + return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop()); } private static ConsumerRecordDeserializer createRecordsDeserializer() { From d2e6e6a50964754327a3e59b1e2df5507b61625f Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 21 Aug 2023 17:40:52 +0400 Subject: [PATCH 16/16] merged with master --- .../ui/controller/MessagesController.java | 30 ++------- .../kafka/ui/emitter/AbstractEmitter.java | 7 +- .../kafka/ui/emitter/BackwardEmitter.java | 13 ++-- .../kafka/ui/emitter/ConsumingStats.java | 5 +- .../provectus/kafka/ui/emitter/Cursor.java | 41 +++++++++--- .../kafka/ui/emitter/ForwardEmitter.java | 13 ++-- .../kafka/ui/emitter/MessagesProcessing.java | 7 +- .../kafka/ui/emitter/RangePollingEmitter.java | 13 ++-- .../kafka/ui/emitter/SeekOperations.java | 7 +- .../kafka/ui/emitter/TailingEmitter.java | 5 +- .../kafka/ui/service/MessagesService.java | 54 ++++++++++----- .../ui/service/PollingCursorsStorage.java | 12 ++++ .../service/analyze/TopicAnalysisService.java | 6 +- .../kafka/ui/emitter/CursorTest.java | 41 ++++++------ .../kafka/ui/service/RecordEmitterTest.java | 67 +++++++------------ 15 files changed, 173 insertions(+), 148 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index 73f26289642..709efcdc185 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -104,38 +104,18 @@ public Mono>> getTopicMessagesV2(Strin String valueSerde, String cursor, ServerWebExchange exchange) { - var context = AccessContext.builder() + var contextBuilder = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_READ) - .operationName("getTopicMessages") - .build(); + .operationName("getTopicMessages"); if (auditService.isAuditTopic(getCluster(clusterName), topicName)) { contextBuilder.auditActions(AuditAction.VIEW); } - seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING; - seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD; - filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS; - - var positions = new ConsumerPosition( - seekType, - topicName, - parseSeekTo(topicName, seekType, seekTo) - ); - Mono>> job = Mono.just( - ResponseEntity.ok( - messagesService.loadMessages( - getCluster(clusterName), topicName, positions, q, filterQueryType, - limit, seekDirection, keySerde, valueSerde) - ) - ); + var accessContext = contextBuilder.build(); - var context = contextBuilder.build(); - return validateAccess(context) - .then(job) - .doOnEach(sig -> audit(context, sig)); Flux messagesFlux; if (cursor != null) { messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor); @@ -151,9 +131,9 @@ public Mono>> getTopicMessagesV2(Strin valueSerde ); } - return accessControlService.validateAccess(context) + return accessControlService.validateAccess(accessContext) .then(Mono.just(ResponseEntity.ok(messagesFlux))) - .doOnEach(sig -> auditService.audit(context, sig)); + .doOnEach(sig -> auditService.audit(accessContext, sig)); } @Override diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index b2a154d9146..21ef0b43adb 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -1,6 +1,7 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import jakarta.annotation.Nullable; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @@ -25,8 +26,10 @@ protected boolean isSendLimitReached() { return messagesProcessing.limitReached(); } - protected void send(FluxSink sink, Iterable> records) { - messagesProcessing.send(sink, records); + protected void send(FluxSink sink, + Iterable> records, + @Nullable Cursor.Tracking cursor) { + messagesProcessing.send(sink, records, cursor); } protected void sendPhase(FluxSink sink, String name) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java index cdc45336e46..75aa21bdf83 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java @@ -18,18 +18,15 @@ public BackwardEmitter(Supplier consumerSupplier, int messagesPerPage, ConsumerRecordDeserializer deserializer, Predicate filter, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super( consumerSupplier, consumerPosition, messagesPerPage, - new MessagesProcessing( - deserializer, - filter, - false, - messagesPerPage - ), - pollingSettings + new MessagesProcessing(deserializer, filter, false, messagesPerPage), + pollingSettings, + cursor ); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java index 0f6b6efed80..17b519434b4 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java @@ -28,8 +28,7 @@ void incFilterApplyError() { filterApplyErrors++; } - - void sendFinishEvent(FluxSink sink, int filterApplyErrors, @Nullable Cursor.Tracking cursor) { + void sendFinishEvent(FluxSink sink, @Nullable Cursor.Tracking cursor) { sink.next( new TopicMessageEventDTO() .type(TopicMessageEventDTO.TypeEnum.DONE) @@ -38,7 +37,7 @@ void sendFinishEvent(FluxSink sink, int filterApplyErrors, ? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor()) : null ) - .consuming(createConsumingStats(sink, filterApplyErrors)) + .consuming(createConsumingStats()) ); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java index d78a583e829..f0fd135bacf 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java @@ -1,5 +1,7 @@ package com.provectus.kafka.ui.emitter; +import com.google.common.collect.HashBasedTable; +import com.google.common.collect.Table; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; @@ -20,32 +22,41 @@ public static class Tracking { private final ConsumerPosition originalPosition; private final Predicate filter; private final int limit; - private final Function cursorRegistry; + private final Function registerAction; - private final Map trackingOffsets = new HashMap<>(); + //topic -> partition -> offset + private final Table trackingOffsets = HashBasedTable.create(); public Tracking(ConsumerRecordDeserializer deserializer, ConsumerPosition originalPosition, Predicate filter, int limit, - Function cursorRegistry) { + Function registerAction) { this.deserializer = deserializer; this.originalPosition = originalPosition; this.filter = filter; this.limit = limit; - this.cursorRegistry = cursorRegistry; + this.registerAction = registerAction; } - void trackOffset(TopicPartition tp, long offset) { - trackingOffsets.put(tp, offset); + void trackOffset(String topic, int partition, long offset) { + trackingOffsets.put(topic, partition, offset); } - void trackOffsets(Map offsets) { - this.trackingOffsets.putAll(offsets); + void initOffsets(Map initialSeekOffsets) { + initialSeekOffsets.forEach((tp, off) -> trackOffset(tp.topic(), tp.partition(), off)); + } + + private Map getOffsetsMap(int offsetToAdd) { + Map result = new HashMap<>(); + trackingOffsets.rowMap() + .forEach((topic, partsMap) -> + partsMap.forEach((p, off) -> result.put(new TopicPartition(topic, p), off + offsetToAdd))); + return result; } String registerCursor() { - return cursorRegistry.apply( + return registerAction.apply( new Cursor( deserializer, new ConsumerPosition( @@ -57,7 +68,17 @@ String registerCursor() { originalPosition.topic(), originalPosition.partitions(), null, - new ConsumerPosition.Offsets(null, trackingOffsets) + new ConsumerPosition.Offsets( + null, + getOffsetsMap( + switch (originalPosition.pollingMode()) { + case TO_OFFSET, TO_TIMESTAMP, LATEST -> 0; + // when doing forward polling we need to start from latest msg's offset + 1 + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> 1; + case TAILING -> throw new IllegalStateException(); + } + ) + ) ), filter, limit diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java index 5c915fb2e8c..6627bc45c10 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java @@ -18,18 +18,15 @@ public ForwardEmitter(Supplier consumerSupplier, int messagesPerPage, ConsumerRecordDeserializer deserializer, Predicate filter, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super( consumerSupplier, consumerPosition, messagesPerPage, - new MessagesProcessing( - deserializer, - filter, - true, - messagesPerPage - ), - pollingSettings + new MessagesProcessing(deserializer, filter, true, messagesPerPage), + pollingSettings, + cursor ); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java index a4789cba4de..8b8332e0398 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java @@ -39,7 +39,9 @@ boolean limitReached() { return limit != null && sentMessages >= limit; } - void send(FluxSink sink, Iterable> polled) { + void send(FluxSink sink, + Iterable> polled, + @Nullable Cursor.Tracking cursor) { sortForSending(polled, ascendingSortBeforeSend) .forEach(rec -> { if (!limitReached() && !sink.isCancelled()) { @@ -53,6 +55,9 @@ void send(FluxSink sink, Iterable consumerSupplier; + private final Cursor.Tracking cursor; protected final ConsumerPosition consumerPosition; protected final int messagesPerPage; @@ -24,11 +25,13 @@ protected RangePollingEmitter(Supplier consumerSupplier, ConsumerPosition consumerPosition, int messagesPerPage, MessagesProcessing messagesProcessing, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super(messagesProcessing, pollingSettings); this.consumerPosition = consumerPosition; this.messagesPerPage = messagesPerPage; this.consumerSupplier = consumerSupplier; + this.cursor = cursor; } protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) { @@ -46,18 +49,20 @@ public void accept(FluxSink sink) { try (EnhancedConsumer consumer = consumerSupplier.get()) { sendPhase(sink, "Consumer created"); var seekOperations = SeekOperations.create(consumer, consumerPosition); + cursor.initOffsets(seekOperations.getOffsetsForSeek()); + TreeMap pollRange = nextPollingRange(new TreeMap<>(), seekOperations); log.debug("Starting from offsets {}", pollRange); - while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) { + while (!sink.isCancelled() && !pollRange.isEmpty() && !isSendLimitReached()) { var polled = poll(consumer, sink, pollRange); - send(sink, polled); + send(sink, polled, cursor); pollRange = nextPollingRange(pollRange, seekOperations); } if (sink.isCancelled()) { log.debug("Polling finished due to sink cancellation"); } - sendFinishStatsAndCompleteSink(sink); + sendFinishStatsAndCompleteSink(sink, pollRange.isEmpty() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 6897974333b..8fa2cfeb0bb 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -21,7 +21,7 @@ public class SeekOperations { private final OffsetsInfo offsetsInfo; private final Map offsetsForSeek; //only contains non-empty partitions! - static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { + public static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty() ? new OffsetsInfo(consumer, consumerPosition.topic()) : new OffsetsInfo(consumer, consumerPosition.partitions()); @@ -29,7 +29,7 @@ static SeekOperations create(Consumer consumer, ConsumerPosition consumerP return new SeekOperations(consumer, offsetsInfo, offsetsToSeek); } - void assignAndSeek() { + public void assignAndSeekNonEmptyPartitions() { consumer.assign(offsetsForSeek.keySet()); offsetsForSeek.forEach(consumer::seek); } @@ -86,8 +86,7 @@ private static Map fixOffsets(OffsetsInfo offsetsInfo, if (positionOffset.offset() != null) { offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset())); } else { - requireNonNull(positionOffset.tpOffsets()); - offsets.putAll(positionOffset.tpOffsets()); + offsets.putAll(requireNonNull(positionOffset.tpOffsets())); offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index 2d8d6093d02..dd73f743710 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -1,8 +1,11 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.util.HashMap; +import java.util.function.Predicate; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.common.errors.InterruptException; @@ -32,7 +35,7 @@ public void accept(FluxSink sink) { while (!sink.isCancelled()) { sendPhase(sink, "Polling"); var polled = poll(sink, consumer); - send(sink, polled); + send(sink, polled, null); } sink.complete(); log.debug("Tailing finished"); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 921f09510e3..b14b885c56a 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -6,23 +6,21 @@ import com.google.common.hash.Hashing; import com.google.common.util.concurrent.RateLimiter; import com.provectus.kafka.ui.config.ClustersProperties; +import com.provectus.kafka.ui.emitter.BackwardEmitter; import com.provectus.kafka.ui.emitter.Cursor; +import com.provectus.kafka.ui.emitter.ForwardEmitter; import com.provectus.kafka.ui.emitter.MessageFilters; -import com.provectus.kafka.ui.emitter.MessagesProcessing; import com.provectus.kafka.ui.emitter.TailingEmitter; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.PollingModeDTO; -import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.ProducerRecordCreator; import com.provectus.kafka.ui.util.SslPropertiesUtil; @@ -250,48 +248,50 @@ private Flux loadMessages(KafkaCluster cluster, return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, limit)); + .flatMap(td -> loadMessagesImpl(cluster, deserializer, consumerPosition, filter, limit)); } private Flux loadMessagesImpl(KafkaCluster cluster, - String topic, ConsumerRecordDeserializer deserializer, ConsumerPosition consumerPosition, Predicate filter, int limit) { - var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); - var filter = getMsgFilter(query, filterQueryType); var emitter = switch (consumerPosition.pollingMode()) { case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, limit, - processing, + deserializer, + filter, cluster.getPollingSettings(), - new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) + cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit) ); case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - processing, + limit, + deserializer, + filter, cluster.getPollingSettings(), - new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) + cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit) ); case TAILING -> new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - processing, + deserializer, + filter, cluster.getPollingSettings() ); }; return Flux.create(emitter) - .map(throttleUiPublish(seekDirection)); + .map(throttleUiPublish(consumerPosition.pollingMode())); } - private Predicate getMsgFilter(String query, - MessageFilterTypeDTO filterQueryType) { - if (StringUtils.isEmpty(query)) { - return evt -> true; + private Predicate getMsgFilter(@Nullable String containsStrFilter, + @Nullable String smartFilterId) { + Predicate messageFilter = MessageFilters.noop(); + if (containsStrFilter != null) { + messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter)); } if (smartFilterId != null) { var registered = registeredFilters.getIfPresent(smartFilterId); @@ -316,4 +316,22 @@ private UnaryOperator throttleUiPublish(PollingModeDTO pollingMode) { return UnaryOperator.identity(); } + private int fixPageSize(@Nullable Integer pageSize) { + return Optional.ofNullable(pageSize) + .filter(ps -> ps > 0 && ps <= maxPageSize) + .orElse(defaultPageSize); + } + + public String registerMessageFilter(String groovyCode) { + String saltedCode = groovyCode + SALT_FOR_HASHING; + String filterId = Hashing.sha256() + .hashString(saltedCode, Charsets.UTF_8) + .toString() + .substring(0, 8); + if (registeredFilters.getIfPresent(filterId) == null) { + registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode)); + } + return filterId; + } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java index 654a6dd4bee..98094b5113b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -4,8 +4,12 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.provectus.kafka.ui.emitter.Cursor; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.TopicMessageDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.util.Map; import java.util.Optional; +import java.util.function.Predicate; import org.apache.commons.lang3.RandomStringUtils; public class PollingCursorsStorage { @@ -16,6 +20,14 @@ public class PollingCursorsStorage { .maximumSize(MAX_SIZE) .build(); + + public Cursor.Tracking createNewCursor(ConsumerRecordDeserializer deserializer, + ConsumerPosition originalPosition, + Predicate filter, + int limit) { + return new Cursor.Tracking(deserializer, originalPosition, filter, limit, this::register); + } + public Optional getCursor(String id) { return Optional.ofNullable(cursorsCache.getIfPresent(id)); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java index 2523aae89ec..692c63109fa 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java @@ -1,6 +1,6 @@ package com.provectus.kafka.ui.service.analyze; -import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING; +import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST; import com.provectus.kafka.ui.emitter.EnhancedConsumer; import com.provectus.kafka.ui.emitter.SeekOperations; @@ -14,6 +14,7 @@ import java.time.Duration; import java.time.Instant; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import lombok.RequiredArgsConstructor; @@ -104,7 +105,8 @@ public void run() { consumer.partitionsFor(topicId.topicName) .forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats())); - var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null)); + var seekOperations = + SeekOperations.create(consumer, new ConsumerPosition(EARLIEST, topicId.topicName, List.of(), null, null)); long summaryOffsetsRange = seekOperations.summaryOffsetsRange(); seekOperations.assignAndSeekNonEmptyPartitions(); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java index 06dffac83f4..88be63fe67b 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -117,56 +117,56 @@ private void emitMessages(AbstractEmitter emitter, int expectedCnt) { .verifyComplete(); } - private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position) { - return new BackwardRecordEmitter( + private BackwardEmitter createBackwardEmitter(ConsumerPosition position) { + return new BackwardEmitter( this::createConsumer, position, PAGE_SIZE, - new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + createRecordsDeserializer(), + m -> true, PollingSettings.createDefault(), createCursor(position) ); } - private BackwardRecordEmitter createBackwardEmitterWithCursor(Cursor cursor) { - return new BackwardRecordEmitter( + private BackwardEmitter createBackwardEmitterWithCursor(Cursor cursor) { + return new BackwardEmitter( this::createConsumer, cursor.consumerPosition(), cursor.limit(), - new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + cursor.deserializer(), + cursor.filter(), PollingSettings.createDefault(), createCursor(cursor.consumerPosition()) ); } - private ForwardRecordEmitter createForwardEmitterWithCursor(Cursor cursor) { - return new ForwardRecordEmitter( + private ForwardEmitter createForwardEmitterWithCursor(Cursor cursor) { + return new ForwardEmitter( this::createConsumer, cursor.consumerPosition(), - new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + cursor.limit(), + cursor.deserializer(), + cursor.filter(), PollingSettings.createDefault(), createCursor(cursor.consumerPosition()) ); } - private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position) { - return new ForwardRecordEmitter( + private ForwardEmitter createForwardEmitter(ConsumerPosition position) { + return new ForwardEmitter( this::createConsumer, position, - new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + PAGE_SIZE, + createRecordsDeserializer(), + m -> true, PollingSettings.createDefault(), createCursor(position) ); } private Cursor.Tracking createCursor(ConsumerPosition position) { - return new Cursor.Tracking( - createRecordsDeserializer(), - position, - m -> true, - PAGE_SIZE, - cursorsStorage::register - ); + return cursorsStorage.createNewCursor(createRecordsDeserializer(), position, m -> true, PAGE_SIZE); } private EnhancedConsumer createConsumer() { @@ -187,7 +187,8 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { s.deserializer(null, Serde.Target.VALUE), StringSerde.name(), s.deserializer(null, Serde.Target.KEY), - s.deserializer(null, Serde.Target.VALUE) + s.deserializer(null, Serde.Target.VALUE), + msg -> msg ); } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java index b2f73e89a19..9c26e78f2a9 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java @@ -10,6 +10,7 @@ import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.emitter.BackwardEmitter; +import com.provectus.kafka.ui.emitter.Cursor; import com.provectus.kafka.ui.emitter.EnhancedConsumer; import com.provectus.kafka.ui.emitter.ForwardEmitter; import com.provectus.kafka.ui.emitter.PollingSettings; @@ -120,13 +121,10 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { void pollNothingOnEmptyTopic() { var forwardEmitter = new ForwardEmitter( this::createConsumer, - new ConsumerPosition(EARLIEST, EMPTY_TOPIC, null), + new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() - new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), - createMessagesProcessing(), PollingSettings.createDefault(), CURSOR_MOCK ); @@ -135,12 +133,10 @@ void pollNothingOnEmptyTopic() { this::createConsumer, new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, - createMessagesProcessing(), - PollingSettings.createDefault(), - CURSOR_MOCK, RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); StepVerifier.create(Flux.create(forwardEmitter)) @@ -161,7 +157,9 @@ void pollFullTopicFromBeginning() { var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null), - createMessagesProcessing(), + PARTITIONS * MSGS_PER_PARTITION, + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -170,12 +168,10 @@ void pollFullTopicFromBeginning() { this::createConsumer, new ConsumerPosition(LATEST, TOPIC, List.of(), null, null), PARTITIONS * MSGS_PER_PARTITION, - createMessagesProcessing(), - PollingSettings.createDefault(), - CURSOR_MOCK RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); List expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()); @@ -195,28 +191,23 @@ void pollWithOffsets() { var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, - new Offsets(null, targetOffsets)), - createMessagesProcessing(), - PollingSettings.createDefault(), - CURSOR_MOCK - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), + new ConsumerPosition.Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, - new Offsets(null, targetOffsets)), + new ConsumerPosition.Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, - createMessagesProcessing(), - PollingSettings.createDefault(), - CURSOR_MOCK RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var expectedValues = SENT_RECORDS.stream() @@ -243,7 +234,9 @@ void pollWithTimestamps() { var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), - createMessagesProcessing(), + PARTITIONS * MSGS_PER_PARTITION, + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -254,23 +247,16 @@ void pollWithTimestamps() { .filter(r -> r.getTimestamp() >= targetTimestamp) .map(Record::getValue) .collect(Collectors.toList()) - new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps), - PARTITIONS * MSGS_PER_PARTITION, - RECORD_DESERIALIZER, - NOOP_FILTER, - PollingSettings.createDefault() ); var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), PARTITIONS * MSGS_PER_PARTITION, - createMessagesProcessing(), - PollingSettings.createDefault(), - CURSOR_MOCK RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter( @@ -293,14 +279,12 @@ void backwardEmitterSeekToEnd() { var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, - new Offsets(null, targetOffsets)), + new ConsumerPosition.Offsets(null, targetOffsets)), numMessages, - createMessagesProcessing(), - PollingSettings.createDefault(), - CURSOR_MOCK RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() + PollingSettings.createDefault(), + CURSOR_MOCK ); var expectedValues = SENT_RECORDS.stream() @@ -323,12 +307,11 @@ void backwardEmitterSeekToBegin() { var backwardEmitter = new BackwardEmitter( this::createConsumer, - new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, + new ConsumerPosition.Offsets(null, offsets)), 100, RECORD_DESERIALIZER, NOOP_FILTER, - PollingSettings.createDefault() - createMessagesProcessing(), PollingSettings.createDefault(), CURSOR_MOCK );