From 6709576bf30ea7c475c97cd6618e1564d54de9f0 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 27 Sep 2023 16:43:02 +0200 Subject: [PATCH 01/27] Manage Confluent tags --- .../controllers/topic/TopicController.java | 9 +++ .../michelin/ns4kafka/models/ObjectMeta.java | 3 +- .../ns4kafka/services/TopicService.java | 42 +++++++++- .../clients/schema/SchemaRegistryClient.java | 54 +++++++++++++ .../clients/schema/entities/TagInfo.java | 4 + .../clients/schema/entities/TagSpecs.java | 11 +++ .../clients/schema/entities/TagTopicInfo.java | 7 ++ .../executors/TopicAsyncExecutor.java | 77 ++++++++++++++++++- .../ns4kafka/utils/tags/TagsUtils.java | 5 ++ 9 files changed, 208 insertions(+), 4 deletions(-) create mode 100644 src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java create mode 100644 src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java create mode 100644 src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java create mode 100644 src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index a2b443d2..1611b0bc 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -88,6 +88,15 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, @Qu validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); } + // validate on new tags only, not on deletion + if(topic.getMetadata().getTags() == null) { + topic.getMetadata().setTags(Collections.emptyList()); + } + List existingTags = existingTopic.isPresent() && existingTopic.get().getMetadata().getTags() != null ? existingTopic.get().getMetadata().getTags() : Collections.emptyList(); + if(topic.getMetadata().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { + validationErrors.addAll(topicService.validateTags(ns, topic)); + } + if (!validationErrors.isEmpty()) { throw new ResourceValidationException(validationErrors, topic.getKind(), topic.getMetadata().getName()); } diff --git a/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java b/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java index e1853e54..b5267158 100644 --- a/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java +++ b/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java @@ -8,6 +8,7 @@ import javax.validation.constraints.Pattern; import java.util.Date; import java.util.Map; +import java.util.List; @Data @Builder @@ -26,5 +27,5 @@ public class ObjectMeta { @EqualsAndHashCode.Exclude @JsonFormat(shape = JsonFormat.Shape.STRING) private Date creationTimestamp; - + private List tags; } diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 0a438288..a3538d75 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -5,6 +5,8 @@ import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.repositories.TopicRepository; +import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; @@ -34,6 +36,9 @@ public class TopicService { @Inject List kafkaAsyncExecutorConfig; + @Inject + SchemaRegistryClient schemaRegistryClient; + /** * Find all topics * @return The list of topics @@ -170,7 +175,6 @@ public List validateTopicUpdate(Namespace namespace, Topic existingTopic validationErrors.add(String.format("Invalid value %s for configuration cleanup.policy: Altering topic configuration from `delete` to `compact` is not currently supported. Please create a new topic with `compact` policy specified instead.", newTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG))); } - return validationErrors; } @@ -284,4 +288,40 @@ public Map deleteRecords(Topic topic, Map validateTags(Namespace namespace, Topic topic) { + List validationErrors = new ArrayList<>(); + + + Optional topicCluster = kafkaAsyncExecutorConfig + .stream() + .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) + .findFirst(); + + if(topicCluster.isPresent() && !topicCluster.get().getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { + validationErrors.add("Tags can only be used on confluent clusters."); + return validationErrors; + } + + Set tagNames = schemaRegistryClient.getTags(namespace.getMetadata().getCluster()) + .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); + + if(tagNames.isEmpty()) { + validationErrors.add(String.format("Invalid value %s for tags: No tags defined on the kafka cluster.", + String.join(" ", topic.getMetadata().getTags()))); + } + + if(!tagNames.containsAll(topic.getMetadata().getTags())) { + validationErrors.add(String.format("Invalid value (%s) for tags: Available tags are (%s).", + String.join(" ", topic.getMetadata().getTags()), String.join(" ", tagNames))); + } + + return validationErrors; + } } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java index c16a12e5..a656ce44 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java @@ -3,8 +3,10 @@ import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.services.clients.schema.entities.*; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; +import io.micronaut.core.type.Argument; import io.micronaut.core.util.StringUtils; import io.micronaut.http.HttpRequest; +import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.MutableHttpRequest; import io.micronaut.http.client.HttpClient; @@ -146,6 +148,58 @@ public Mono deleteCurrentCompatibilityBySubject(Str return Mono.from(httpClient.retrieve(request, SchemaCompatibilityResponse.class)); } + /** + * List tags + * @param kafkaCluster The Kafka cluster + * @return A list of tags + */ + public Mono> getTags(String kafkaCluster) { + KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/types/tagdefs"))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + return Mono.from(httpClient.retrieve(request, Argument.listOf(TagInfo.class))); + } + + /** + * List tags of a topic + * @param kafkaCluster The Kafka cluster + * @param entityName The topic's name for the API + * @return A list of tags + */ + public Mono> getTopicWithTags(String kafkaCluster, String entityName) { + KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); + } + + /** + * Add a tag to a topic + * @param kafkaCluster The Kafka cluster + * @param tagSpecs Tags to add + * @return Information about added tags + */ + public Mono> addTags(String kafkaCluster, List tagSpecs) { + KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + HttpRequest request = HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/tags")), tagSpecs) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); + } + + /** + * Delete a tag to a topic + * @param kafkaCluster The Kafka cluster + * @param entityName The topic's name + * @param tagName The tag to delete + * @return The resume response + */ + public Mono> deleteTag(String kafkaCluster, String entityName, String tagName) { + KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + HttpRequest request = HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags/" + tagName))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + return Mono.from(httpClient.exchange(request, Void.class)); + } + /** * Get the schema registry of the given Kafka cluster * @param kafkaCluster The Kafka cluster diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java new file mode 100644 index 00000000..e821b9f5 --- /dev/null +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java @@ -0,0 +1,4 @@ +package com.michelin.ns4kafka.services.clients.schema.entities; + +public record TagInfo(String name) { +} \ No newline at end of file diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java new file mode 100644 index 00000000..4c854f8d --- /dev/null +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java @@ -0,0 +1,11 @@ +package com.michelin.ns4kafka.services.clients.schema.entities; + +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.Builder; + +import java.util.Map; + +@Builder +public record TagSpecs(String entityName, String entityType, String typeName) { +} diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java new file mode 100644 index 00000000..c3a9e393 --- /dev/null +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java @@ -0,0 +1,7 @@ +package com.michelin.ns4kafka.services.clients.schema.entities; + +import lombok.Builder; + +@Builder +public record TagTopicInfo(String entityName, String entityType, String typeName, String entityStatus) { +} diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 64593d24..49e657a3 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -5,6 +5,9 @@ import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.repositories.TopicRepository; import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; +import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; +import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import io.micronaut.context.annotation.EachBean; import jakarta.inject.Inject; import jakarta.inject.Singleton; @@ -22,6 +25,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; + @Slf4j @EachBean(KafkaAsyncExecutorConfig.class) @Singleton @@ -31,6 +36,9 @@ public class TopicAsyncExecutor { @Inject TopicRepository topicRepository; + @Inject + SchemaRegistryClient schemaRegistryClient; + public TopicAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; } @@ -95,6 +103,10 @@ public void synchronizeTopics() { createTopics(toCreate); alterTopics(toUpdate, toCheckConf); + + createTags(ns4kafkaTopics, brokerTopics); + deleteTags(ns4kafkaTopics, brokerTopics); + } catch (ExecutionException | TimeoutException | CancellationException | KafkaStoreException e) { log.error("Error", e); } catch (InterruptedException e) { @@ -103,6 +115,54 @@ public void synchronizeTopics() { } } + /** + * Create tags + * @param ns4kafkaTopics Topics from ns4kafka + * @param brokerTopics Topics from broker + */ + public void createTags(List ns4kafkaTopics, Map brokerTopics) { + List tagsToCreate = ns4kafkaTopics.stream().flatMap(ns4kafkaTopic -> { + Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); + + List existingTags = brokerTopic != null && brokerTopic.getMetadata().getTags() != null ? brokerTopic.getMetadata().getTags() : Collections.emptyList(); + List newTags = ns4kafkaTopic.getMetadata().getTags() != null ? ns4kafkaTopic.getMetadata().getTags() : Collections.emptyList(); + + return newTags.stream().filter(tag -> !existingTags.contains(tag)).map(tag -> TagSpecs.builder() + .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty("cluster.id")+":"+ns4kafkaTopic.getMetadata().getName()) + .typeName(tag) + .entityType(TOPIC_ENTITY_TYPE) + .build()); + }).toList(); + + if(!tagsToCreate.isEmpty()) { + schemaRegistryClient.addTags(kafkaAsyncExecutorConfig.getName(), tagsToCreate).block(); + } + } + + /** + * Delete tags + * @param ns4kafkaTopics Topics from ns4kafka + * @param brokerTopics Topics from broker + */ + public void deleteTags(List ns4kafkaTopics, Map brokerTopics) { + + List tagsToDelete = brokerTopics.values().stream().flatMap(brokerTopic -> { + Optional newTopic = ns4kafkaTopics.stream() + .filter(ns4kafkaTopic -> ns4kafkaTopic.getMetadata().getName().equals(brokerTopic.getMetadata().getName())) + .findFirst(); + List newTags = newTopic.isPresent() && newTopic.get().getMetadata().getTags() != null ? newTopic.get().getMetadata().getTags() : Collections.emptyList(); + List existingTags = brokerTopic.getMetadata().getTags() != null ? brokerTopic.getMetadata().getTags() : Collections.emptyList(); + + return existingTags.stream().filter(tag -> !newTags.contains(tag)).map(tag -> TagTopicInfo.builder() + .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty("cluster.id")+":"+brokerTopic.getMetadata().getName()) + .typeName(tag) + .entityType(TOPIC_ENTITY_TYPE) + .build()); + }).toList(); + + tagsToDelete.forEach(tag -> schemaRegistryClient.deleteTag(kafkaAsyncExecutorConfig.getName(), tag.entityName(), tag.typeName()).block()); + } + /** * Delete a topic * @param topic The topic to delete @@ -134,9 +194,11 @@ public List listBrokerTopicNames() throws InterruptedException, Executio public Map collectBrokerTopicsFromNames(List topicNames) throws InterruptedException, ExecutionException, TimeoutException { Map topicDescriptions = getAdminClient().describeTopics(topicNames).all().get(); + + // Create a Map> for all topics // includes only Dynamic config properties - return getAdminClient() + Map topics = getAdminClient() .describeConfigs(topicNames.stream() .map(s -> new ConfigResource(ConfigResource.Type.TOPIC, s)) .toList()) @@ -166,6 +228,17 @@ public Map collectBrokerTopicsFromNames(List topicNames) .build() ) .collect(Collectors.toMap( topic -> topic.getMetadata().getName(), Function.identity())); + + + if(kafkaAsyncExecutorConfig.getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { + topics.entrySet().stream() + .forEach(entry -> + entry.getValue().getMetadata().setTags(schemaRegistryClient.getTopicWithTags(kafkaAsyncExecutorConfig.getName(), + kafkaAsyncExecutorConfig.getConfig().getProperty("cluster.id") + ":" + entry.getValue().getMetadata().getName()) + .block().stream().map(TagTopicInfo::typeName).toList())); + } + + return topics; } private void alterTopics(Map> toUpdate, List topics) { @@ -223,7 +296,7 @@ private void createTopics(List topics) { topicRepository.create(createdTopic); }); } - + private Collection computeConfigChanges(Map expected, Map actual){ List toCreate = expected.entrySet() .stream() diff --git a/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java b/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java new file mode 100644 index 00000000..d4338701 --- /dev/null +++ b/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java @@ -0,0 +1,5 @@ +package com.michelin.ns4kafka.utils.tags; + +public class TagsUtils { + public static final String TOPIC_ENTITY_TYPE = "kafka_topic"; +} From be69f49955a43e1c39a011db708e22fd3dfa0356 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 27 Sep 2023 17:04:07 +0200 Subject: [PATCH 02/27] Manage Confluent tags --- .../com/michelin/ns4kafka/controllers/TopicControllerTest.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 369ead59..53d7e064 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -22,6 +22,7 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -452,6 +453,7 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE Topic existing = Topic.builder() .metadata(ObjectMeta.builder() .name("test.topic") + .tags(Collections.emptyList()) .namespace("test") .cluster("local") .build()) @@ -467,6 +469,7 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE Topic topic = Topic.builder() .metadata(ObjectMeta.builder() .name("test.topic") + .tags(Collections.emptyList()) .build()) .spec(Topic.TopicSpec.builder() .replicationFactor(3) From a2c3efdb13439a1ce4e7d9a9bdc4e8d7411d0ae7 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 27 Sep 2023 17:29:39 +0200 Subject: [PATCH 03/27] Manage Confluent tags --- .../controllers/topic/TopicController.java | 26 +++++++++++++------ .../ns4kafka/services/TopicService.java | 3 ++- .../clients/schema/entities/TagSpecs.java | 4 --- .../executors/TopicAsyncExecutor.java | 7 ++--- .../ns4kafka/utils/config/ClusterConfig.java | 9 +++++++ .../ns4kafka/utils/tags/TagsUtils.java | 5 +++- 6 files changed, 37 insertions(+), 17 deletions(-) create mode 100644 src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index 1611b0bc..5514162e 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -88,14 +88,7 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, @Qu validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); } - // validate on new tags only, not on deletion - if(topic.getMetadata().getTags() == null) { - topic.getMetadata().setTags(Collections.emptyList()); - } - List existingTags = existingTopic.isPresent() && existingTopic.get().getMetadata().getTags() != null ? existingTopic.get().getMetadata().getTags() : Collections.emptyList(); - if(topic.getMetadata().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { - validationErrors.addAll(topicService.validateTags(ns, topic)); - } + validateTags(topic, existingTopic, validationErrors, ns); if (!validationErrors.isEmpty()) { throw new ResourceValidationException(validationErrors, topic.getKind(), topic.getMetadata().getName()); @@ -130,6 +123,23 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, @Qu return formatHttpResponse(topicService.create(topic), status); } + /** + * Validate on new tags only, not on deletion + * @param topic The topic to apply + * @param existingTopic The existing topic + * @param validationErrors A list of validation errors + * @param ns The namespace + */ + public void validateTags(Topic topic, Optional existingTopic, List validationErrors, Namespace ns) { + if(topic.getMetadata().getTags() == null) { + topic.getMetadata().setTags(Collections.emptyList()); + } + List existingTags = existingTopic.isPresent() && existingTopic.get().getMetadata().getTags() != null ? existingTopic.get().getMetadata().getTags() : Collections.emptyList(); + if(topic.getMetadata().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { + validationErrors.addAll(topicService.validateTags(ns, topic)); + } + } + /** * Delete a topic * @param namespace The namespace diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index a3538d75..804a12b8 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -312,9 +312,10 @@ public List validateTags(Namespace namespace, Topic topic) { Set tagNames = schemaRegistryClient.getTags(namespace.getMetadata().getCluster()) .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); - if(tagNames.isEmpty()) { + if(tagNames == null || tagNames.isEmpty()) { validationErrors.add(String.format("Invalid value %s for tags: No tags defined on the kafka cluster.", String.join(" ", topic.getMetadata().getTags()))); + return validationErrors; } if(!tagNames.containsAll(topic.getMetadata().getTags())) { diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java index 4c854f8d..a991741a 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java @@ -1,11 +1,7 @@ package com.michelin.ns4kafka.services.clients.schema.entities; -import com.fasterxml.jackson.annotation.JsonAnyGetter; -import com.fasterxml.jackson.annotation.JsonInclude; import lombok.Builder; -import java.util.Map; - @Builder public record TagSpecs(String entityName, String entityType, String typeName) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 49e657a3..6c9afdce 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -25,6 +25,7 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; @Slf4j @@ -128,7 +129,7 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi List newTags = ns4kafkaTopic.getMetadata().getTags() != null ? ns4kafkaTopic.getMetadata().getTags() : Collections.emptyList(); return newTags.stream().filter(tag -> !existingTags.contains(tag)).map(tag -> TagSpecs.builder() - .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty("cluster.id")+":"+ns4kafkaTopic.getMetadata().getName()) + .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID)+":"+ns4kafkaTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); @@ -154,7 +155,7 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi List existingTags = brokerTopic.getMetadata().getTags() != null ? brokerTopic.getMetadata().getTags() : Collections.emptyList(); return existingTags.stream().filter(tag -> !newTags.contains(tag)).map(tag -> TagTopicInfo.builder() - .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty("cluster.id")+":"+brokerTopic.getMetadata().getName()) + .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID)+":"+brokerTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); @@ -234,7 +235,7 @@ public Map collectBrokerTopicsFromNames(List topicNames) topics.entrySet().stream() .forEach(entry -> entry.getValue().getMetadata().setTags(schemaRegistryClient.getTopicWithTags(kafkaAsyncExecutorConfig.getName(), - kafkaAsyncExecutorConfig.getConfig().getProperty("cluster.id") + ":" + entry.getValue().getMetadata().getName()) + kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) .block().stream().map(TagTopicInfo::typeName).toList())); } diff --git a/src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java b/src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java new file mode 100644 index 00000000..d05e79fd --- /dev/null +++ b/src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java @@ -0,0 +1,9 @@ +package com.michelin.ns4kafka.utils.config; + +public final class ClusterConfig { + + public static final String CLUSTER_ID = "cluster.id"; + + private ClusterConfig() { + } +} diff --git a/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java b/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java index d4338701..a75e8f23 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java +++ b/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java @@ -1,5 +1,8 @@ package com.michelin.ns4kafka.utils.tags; -public class TagsUtils { +public final class TagsUtils { public static final String TOPIC_ENTITY_TYPE = "kafka_topic"; + + private TagsUtils() { + } } From 79b46c62b8fe4a08280ea39648d0800ad6f7c9de Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 27 Sep 2023 18:10:14 +0200 Subject: [PATCH 04/27] Manage Confluent tags --- .../ns4kafka/services/TopicService.java | 3 +- .../clients/schema/entities/TagInfo.java | 3 + .../controllers/TopicControllerTest.java | 6 +- .../ns4kafka/services/TopicServiceTest.java | 92 +++++++++++++++++++ 4 files changed, 98 insertions(+), 6 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 804a12b8..9928105f 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -298,7 +298,6 @@ public Map deleteRecords(Topic topic, Map validateTags(Namespace namespace, Topic topic) { List validationErrors = new ArrayList<>(); - Optional topicCluster = kafkaAsyncExecutorConfig .stream() .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) @@ -313,7 +312,7 @@ public List validateTags(Namespace namespace, Topic topic) { .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); if(tagNames == null || tagNames.isEmpty()) { - validationErrors.add(String.format("Invalid value %s for tags: No tags defined on the kafka cluster.", + validationErrors.add(String.format("Invalid value (%s) for tags: No tags defined on the kafka cluster.", String.join(" ", topic.getMetadata().getTags()))); return validationErrors; } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java index e821b9f5..1391b78a 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java @@ -1,4 +1,7 @@ package com.michelin.ns4kafka.services.clients.schema.entities; +import lombok.Builder; + +@Builder public record TagInfo(String name) { } \ No newline at end of file diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 53d7e064..6e549ba5 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -22,10 +22,7 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; @@ -252,6 +249,7 @@ void createNewTopic() throws InterruptedException, ExecutionException, TimeoutEx Topic topic = Topic.builder() .metadata(ObjectMeta.builder() .name("test.topic") + .tags(List.of("TAG_TEST")) .build()) .spec(Topic.TopicSpec.builder() .replicationFactor(3) diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 3c5e847b..694ce385 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -7,6 +7,8 @@ import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.repositories.TopicRepository; import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; @@ -17,6 +19,7 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; +import reactor.core.publisher.Mono; import java.util.*; import java.util.concurrent.ExecutionException; @@ -43,6 +46,9 @@ class TopicServiceTest { @Mock List kafkaAsyncExecutorConfigs; + @Mock + SchemaRegistryClient schemaRegistryClient; + /** * Validate find topic by name */ @@ -931,4 +937,90 @@ void findAll() { List topics = topicService.findAll(); assertEquals(4, topics.size()); } + + @Test + void validateTagsShouldWork() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("TAG_TEST")).build()) + .build(); + + List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); + + when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); + + List validationErrors = topicService.validateTags(ns, topic); + assertEquals(0, validationErrors.size()); + } + + @Test + void validateTagsShouldReturnErrorBecauseOfNonConfluentBroker() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("TAG_TEST")).build()) + .build(); + + when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED))); + + List validationErrors = topicService.validateTags(ns, topic); + assertEquals(1, validationErrors.size()); + assertEquals("Tags can only be used on confluent clusters.", validationErrors.get(0)); + } + + @Test + void validateTagsShouldReturnErrorBecauseOfNoTagsDefinedInBroker() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("TAG_TEST")).build()) + .build(); + + when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(Collections.emptyList())); + + List validationErrors = topicService.validateTags(ns, topic); + assertEquals(1, validationErrors.size()); + assertEquals("Invalid value (TAG_TEST) for tags: No tags defined on the kafka cluster.", validationErrors.get(0)); + } + + @Test + void validateTagsShouldReturnErrorBecauseOfBadTagsDefined() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("BAD_TAG")).build()) + .build(); + + List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); + + when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); + + List validationErrors = topicService.validateTags(ns, topic); + assertEquals(1, validationErrors.size()); + assertEquals("Invalid value (BAD_TAG) for tags: Available tags are (TAG_TEST).", validationErrors.get(0)); + } } From 5333e58cad61bbde153ff3e0e16019f957252a8e Mon Sep 17 00:00:00 2001 From: E046899 Date: Thu, 28 Sep 2023 10:12:01 +0200 Subject: [PATCH 05/27] Manage Confluent tags --- .../executors/TopicAsyncExecutor.java | 22 +- .../services/executors/HttpResponseMock.java | 30 +++ .../executors/TopicAsyncExecutorMock.java | 41 ++++ .../executors/TopicAsyncExecutorTest.java | 193 ++++++++++++++++++ 4 files changed, 278 insertions(+), 8 deletions(-) create mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java create mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java create mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 6c9afdce..8b2cc565 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -196,7 +196,6 @@ public List listBrokerTopicNames() throws InterruptedException, Executio public Map collectBrokerTopicsFromNames(List topicNames) throws InterruptedException, ExecutionException, TimeoutException { Map topicDescriptions = getAdminClient().describeTopics(topicNames).all().get(); - // Create a Map> for all topics // includes only Dynamic config properties Map topics = getAdminClient() @@ -230,16 +229,23 @@ public Map collectBrokerTopicsFromNames(List topicNames) ) .collect(Collectors.toMap( topic -> topic.getMetadata().getName(), Function.identity())); + completeWithTags(topics); - if(kafkaAsyncExecutorConfig.getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { - topics.entrySet().stream() + return topics; + } + + /** + * Complete topics with confluent tags + * @param topics Topics to complete + */ + public void completeWithTags(Map topics) { + if(kafkaAsyncExecutorConfig.getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { + topics.entrySet().stream() .forEach(entry -> entry.getValue().getMetadata().setTags(schemaRegistryClient.getTopicWithTags(kafkaAsyncExecutorConfig.getName(), - kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) - .block().stream().map(TagTopicInfo::typeName).toList())); - } - - return topics; + kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) + .block().stream().map(TagTopicInfo::typeName).toList())); + } } private void alterTopics(Map> toUpdate, List topics) { diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java new file mode 100644 index 00000000..5dea72d5 --- /dev/null +++ b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java @@ -0,0 +1,30 @@ +package com.michelin.ns4kafka.services.executors; + +import io.micronaut.core.convert.value.MutableConvertibleValues; +import io.micronaut.http.HttpHeaders; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.HttpStatus; + +import java.util.Optional; + +public class HttpResponseMock implements HttpResponse { + @Override + public HttpStatus getStatus() { + return null; + } + + @Override + public HttpHeaders getHeaders() { + return null; + } + + @Override + public MutableConvertibleValues getAttributes() { + return null; + } + + @Override + public Optional getBody() { + return Optional.empty(); + } +} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java new file mode 100644 index 00000000..974115e4 --- /dev/null +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java @@ -0,0 +1,41 @@ +package com.michelin.ns4kafka.services.executors; + +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.repositories.TopicRepository; +import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; +import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; +import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; +import io.micronaut.context.annotation.EachBean; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.admin.*; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigResource; + +import java.time.Instant; +import java.util.*; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; +import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; + +public class TopicAsyncExecutorMock extends TopicAsyncExecutor { + + public TopicAsyncExecutorMock(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { + super(kafkaAsyncExecutorConfig); + } + + public void setSchemaRegistryClient(SchemaRegistryClient schemaRegistryClient) { + this.schemaRegistryClient = schemaRegistryClient; + } + +} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java new file mode 100644 index 00000000..64b0a25c --- /dev/null +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -0,0 +1,193 @@ +package com.michelin.ns4kafka.services.executors; + +import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import reactor.core.publisher.Mono; + +import java.util.*; + +import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.*; + +@ExtendWith(MockitoExtension.class) +public class TopicAsyncExecutorTest { + + private static final String CLUSTER_ID_TEST = "cluster_id_test"; + private static final String LOCAL_CLUSTER = "local"; + private static final String TOPIC_NAME = "topic"; + private static final String TAG1 = "TAG1"; + private static final String TAG2 = "TAG2"; + + @Mock + SchemaRegistryClient schemaRegistryClient; + + @Test + public void createTagsShouldAddTags() { + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); + Properties properties = new Properties(); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); + kafkaAsyncExecutorConfig.setConfig(properties); + TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); + topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + when(schemaRegistryClient.addTags(anyString(), anyList())).thenReturn(Mono.just(new ArrayList<>())); + + List ns4kafkaTopics = new ArrayList<>(); + Topic ns4kafkaTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG1)) + .name(TOPIC_NAME).build()).build(); + ns4kafkaTopics.add(ns4kafkaTopic); + + Map brokerTopics = new HashMap<>(); + Topic brokerTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG2)) + .name(TOPIC_NAME).build()).build(); + brokerTopics.put(TOPIC_NAME, brokerTopic); + + topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); + + verify(schemaRegistryClient, times(1)).addTags(anyString(), anyList()); + } + + @Test + public void createTagsShouldNotAddTags() { + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); + Properties properties = new Properties(); + properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + kafkaAsyncExecutorConfig.setConfig(properties); + TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); + topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + List ns4kafkaTopics = new ArrayList<>(); + Topic ns4kafkaTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG1)) + .name(TOPIC_NAME).build()).build(); + ns4kafkaTopics.add(ns4kafkaTopic); + + Map brokerTopics = new HashMap<>(); + Topic brokerTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG1)) + .name(TOPIC_NAME).build()).build(); + brokerTopics.put(TOPIC_NAME, brokerTopic); + + topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); + + verify(schemaRegistryClient, times(0)).addTags(anyString(), anyList()); + } + + @Test + public void deleteTagsShouldDeleteTags() { + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); + Properties properties = new Properties(); + properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + kafkaAsyncExecutorConfig.setConfig(properties); + TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); + topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + when(schemaRegistryClient.deleteTag(anyString(),anyString(),anyString())).thenReturn(Mono.just(new HttpResponseMock())); + + List ns4kafkaTopics = new ArrayList<>(); + Topic ns4kafkaTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG2)) + .name(TOPIC_NAME).build()).build(); + ns4kafkaTopics.add(ns4kafkaTopic); + + Map brokerTopics = new HashMap<>(); + Topic brokerTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG1,TAG2)) + .name(TOPIC_NAME).build()).build(); + brokerTopics.put(TOPIC_NAME, brokerTopic); + + topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); + + verify(schemaRegistryClient, times(1)).deleteTag(anyString(),anyString(),anyString()); + } + + @Test + public void deleteTagsShouldNotDeleteTags() { + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); + Properties properties = new Properties(); + properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + kafkaAsyncExecutorConfig.setConfig(properties); + TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); + topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + List ns4kafkaTopics = new ArrayList<>(); + Topic ns4kafkaTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG1)) + .name(TOPIC_NAME).build()).build(); + ns4kafkaTopics.add(ns4kafkaTopic); + + Map brokerTopics = new HashMap<>(); + Topic brokerTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .tags(List.of(TAG1)) + .name(TOPIC_NAME).build()).build(); + brokerTopics.put(TOPIC_NAME, brokerTopic); + + topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); + + verify(schemaRegistryClient, times(0)).deleteTag(anyString(),anyString(),anyString()); + } + + @Test + public void completeWithTagsShouldComplete() { + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER, KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD); + Properties properties = new Properties(); + properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + kafkaAsyncExecutorConfig.setConfig(properties); + TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); + topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); + + when(schemaRegistryClient.getTopicWithTags(anyString(),anyString())).thenReturn(Mono.just(List.of(tagTopicInfo))); + + Map brokerTopics = new HashMap<>(); + Topic brokerTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME).build()).build(); + brokerTopics.put(TOPIC_NAME, brokerTopic); + + topicAsyncExecutor.completeWithTags(brokerTopics); + + assertEquals(TAG1,brokerTopics.get(TOPIC_NAME).getMetadata().getTags().get(0)); + } + + @Test + public void completeWithTagsShouldNotComplete() { + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER, KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED); + Properties properties = new Properties(); + properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + kafkaAsyncExecutorConfig.setConfig(properties); + TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); + topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + Map brokerTopics = new HashMap<>(); + Topic brokerTopic = Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME).build()).build(); + brokerTopics.put(TOPIC_NAME, brokerTopic); + + topicAsyncExecutor.completeWithTags(brokerTopics); + + assertNull(brokerTopics.get(TOPIC_NAME).getMetadata().getTags()); + } +} From 435910d6ffc2b53344b07ca47007606ff56f6b87 Mon Sep 17 00:00:00 2001 From: E046899 Date: Thu, 28 Sep 2023 10:21:03 +0200 Subject: [PATCH 06/27] Manage Confluent tags --- .../services/executors/TopicAsyncExecutorTest.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index 64b0a25c..b45d0648 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -20,7 +20,7 @@ import static org.mockito.Mockito.*; @ExtendWith(MockitoExtension.class) -public class TopicAsyncExecutorTest { +class TopicAsyncExecutorTest { private static final String CLUSTER_ID_TEST = "cluster_id_test"; private static final String LOCAL_CLUSTER = "local"; @@ -32,7 +32,7 @@ public class TopicAsyncExecutorTest { SchemaRegistryClient schemaRegistryClient; @Test - public void createTagsShouldAddTags() { + void createTagsShouldAddTags() { KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); @@ -62,7 +62,7 @@ public void createTagsShouldAddTags() { } @Test - public void createTagsShouldNotAddTags() { + void createTagsShouldNotAddTags() { KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); @@ -90,7 +90,7 @@ public void createTagsShouldNotAddTags() { } @Test - public void deleteTagsShouldDeleteTags() { + void deleteTagsShouldDeleteTags() { KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); @@ -120,7 +120,7 @@ public void deleteTagsShouldDeleteTags() { } @Test - public void deleteTagsShouldNotDeleteTags() { + void deleteTagsShouldNotDeleteTags() { KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); @@ -148,7 +148,7 @@ public void deleteTagsShouldNotDeleteTags() { } @Test - public void completeWithTagsShouldComplete() { + void completeWithTagsShouldComplete() { KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER, KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); @@ -172,7 +172,7 @@ public void completeWithTagsShouldComplete() { } @Test - public void completeWithTagsShouldNotComplete() { + void completeWithTagsShouldNotComplete() { KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER, KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); From 521dc9a0e434a634d9bd494c77259be5b8ff47e4 Mon Sep 17 00:00:00 2001 From: E046899 Date: Thu, 28 Sep 2023 10:34:25 +0200 Subject: [PATCH 07/27] Manage Confluent tags --- README.md | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 6b1e2a8c..92f8eefe 100644 --- a/README.md +++ b/README.md @@ -14,22 +14,24 @@ Ns4Kafka introduces namespace functionality to Apache Kafka, as well as a new de ## Table of Contents -* [Principles](#principles) - * [Namespace Isolation](#namespace-isolation) - * [Desired State](#desired-state) - * [Server Side Validation](#server-side-validation) - * [CLI](#cli) -* [Download](#download) -* [Install](#install) -* [Demo Environment](#demo-environment) -* [Configuration](#configuration) - * [GitLab Authentication](#gitlab-authentication) - * [Admin Account](#admin-account) - * [Kafka Broker Authentication](#kafka-broker-authentication) - * [Managed clusters](#managed-clusters) - * [AKHQ](#akhq) -* [Administration](#administration) -* [Contribution](#contribution) +- [Ns4Kafka](#ns4kafka) + - [Table of Contents](#table-of-contents) + - [Principles](#principles) + - [Namespace Isolation](#namespace-isolation) + - [Desired State](#desired-state) + - [Server Side Validation](#server-side-validation) + - [CLI](#cli) + - [Download](#download) + - [Install](#install) + - [Demo Environment](#demo-environment) + - [Configuration](#configuration) + - [GitLab Authentication](#gitlab-authentication) + - [Admin Account](#admin-account) + - [Kafka Broker Authentication](#kafka-broker-authentication) + - [Managed clusters](#managed-clusters) + - [AKHQ](#akhq) + - [Administration](#administration) + - [Contribution](#contribution) ## Principles @@ -168,6 +170,7 @@ ns4kafka: sasl.mechanism: "PLAIN" security.protocol: "SASL_PLAINTEXT" sasl.jaas.config: "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"admin\";" + cluster.id: "lkc-abcde" schema-registry: url: "http://localhost:8081" basicAuthUsername: "user" @@ -190,6 +193,7 @@ The name for each managed cluster has to be unique. This is this name you have t | drop-unsync-acls | boolean | Should Ns4Kafka drop unsynchronized ACLs | | provider | boolean | The kind of cluster. Either SELF_MANAGED or CONFLUENT_CLOUD | | config.bootstrap.servers | string | The location of the clusters servers | +| config.cluster.id | string | The confluent cloud cluster id to manage tags | | schema-registry.url | string | The location of the Schema Registry | | schema-registry.basicAuthUsername | string | Basic authentication username to the Schema Registry | | schema-registry.basicAuthPassword | string | Basic authentication password to the Schema Registry | From a7577ea5fed010a6fcbd157b306d96827fe040f9 Mon Sep 17 00:00:00 2001 From: E046899 Date: Mon, 2 Oct 2023 18:10:29 +0200 Subject: [PATCH 08/27] Manage Confluent tags --- .../controllers/topic/TopicController.java | 22 +---- .../michelin/ns4kafka/models/ObjectMeta.java | 1 - .../com/michelin/ns4kafka/models/Topic.java | 5 + .../ns4kafka/services/TopicService.java | 8 +- .../executors/TopicAsyncExecutor.java | 18 ++-- .../controllers/TopicControllerTest.java | 6 +- .../ns4kafka/services/TopicServiceTest.java | 24 +++-- .../executors/TagSpecsArgumentMatcher.java | 25 +++++ .../executors/TopicAsyncExecutorMock.java | 41 -------- .../executors/TopicAsyncExecutorTest.java | 94 +++++++++++-------- 10 files changed, 118 insertions(+), 126 deletions(-) create mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java delete mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index 5514162e..a4be9bea 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -88,7 +88,10 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, @Qu validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); } - validateTags(topic, existingTopic, validationErrors, ns); + List existingTags = existingTopic.isPresent() && existingTopic.get().getSpec().getTags() != null ? existingTopic.get().getSpec().getTags() : Collections.emptyList(); + if(topic.getSpec().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { + validationErrors.addAll(topicService.validateTags(ns, topic)); + } if (!validationErrors.isEmpty()) { throw new ResourceValidationException(validationErrors, topic.getKind(), topic.getMetadata().getName()); @@ -123,23 +126,6 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, @Qu return formatHttpResponse(topicService.create(topic), status); } - /** - * Validate on new tags only, not on deletion - * @param topic The topic to apply - * @param existingTopic The existing topic - * @param validationErrors A list of validation errors - * @param ns The namespace - */ - public void validateTags(Topic topic, Optional existingTopic, List validationErrors, Namespace ns) { - if(topic.getMetadata().getTags() == null) { - topic.getMetadata().setTags(Collections.emptyList()); - } - List existingTags = existingTopic.isPresent() && existingTopic.get().getMetadata().getTags() != null ? existingTopic.get().getMetadata().getTags() : Collections.emptyList(); - if(topic.getMetadata().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { - validationErrors.addAll(topicService.validateTags(ns, topic)); - } - } - /** * Delete a topic * @param namespace The namespace diff --git a/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java b/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java index b5267158..1403e5b9 100644 --- a/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java +++ b/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java @@ -27,5 +27,4 @@ public class ObjectMeta { @EqualsAndHashCode.Exclude @JsonFormat(shape = JsonFormat.Shape.STRING) private Date creationTimestamp; - private List tags; } diff --git a/src/main/java/com/michelin/ns4kafka/models/Topic.java b/src/main/java/com/michelin/ns4kafka/models/Topic.java index 487916e5..938b580e 100644 --- a/src/main/java/com/michelin/ns4kafka/models/Topic.java +++ b/src/main/java/com/michelin/ns4kafka/models/Topic.java @@ -1,6 +1,8 @@ package com.michelin.ns4kafka.models; import com.fasterxml.jackson.annotation.JsonFormat; +import com.fasterxml.jackson.annotation.JsonSetter; +import com.fasterxml.jackson.annotation.Nulls; import io.micronaut.core.annotation.Introspected; import io.swagger.v3.oas.annotations.media.Schema; import lombok.*; @@ -9,6 +11,7 @@ import javax.validation.constraints.NotNull; import java.time.Instant; import java.util.Date; +import java.util.List; import java.util.Map; @Data @@ -37,6 +40,8 @@ public class Topic { public static class TopicSpec { private int replicationFactor; private int partitions; + @JsonSetter(nulls = Nulls.AS_EMPTY) + private List tags; private Map configs; } diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 9928105f..943572a3 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -304,7 +304,7 @@ public List validateTags(Namespace namespace, Topic topic) { .findFirst(); if(topicCluster.isPresent() && !topicCluster.get().getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { - validationErrors.add("Tags can only be used on confluent clusters."); + validationErrors.add(String.format("Invalid value (%s) for tags: Tags are not currently supported.", String.join(",", topic.getSpec().getTags()))); return validationErrors; } @@ -313,13 +313,13 @@ public List validateTags(Namespace namespace, Topic topic) { if(tagNames == null || tagNames.isEmpty()) { validationErrors.add(String.format("Invalid value (%s) for tags: No tags defined on the kafka cluster.", - String.join(" ", topic.getMetadata().getTags()))); + String.join(",", topic.getSpec().getTags()))); return validationErrors; } - if(!tagNames.containsAll(topic.getMetadata().getTags())) { + if(!tagNames.containsAll(topic.getSpec().getTags())) { validationErrors.add(String.format("Invalid value (%s) for tags: Available tags are (%s).", - String.join(" ", topic.getMetadata().getTags()), String.join(" ", tagNames))); + String.join(",", topic.getSpec().getTags()), String.join(",", tagNames))); } return validationErrors; diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 8b2cc565..7bae9fd5 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -11,6 +11,7 @@ import io.micronaut.context.annotation.EachBean; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.TopicPartition; @@ -31,19 +32,14 @@ @Slf4j @EachBean(KafkaAsyncExecutorConfig.class) @Singleton +@AllArgsConstructor public class TopicAsyncExecutor { private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; - @Inject TopicRepository topicRepository; - @Inject SchemaRegistryClient schemaRegistryClient; - public TopicAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; - } - private Admin getAdminClient(){ return kafkaAsyncExecutorConfig.getAdminClient(); } @@ -125,8 +121,8 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi List tagsToCreate = ns4kafkaTopics.stream().flatMap(ns4kafkaTopic -> { Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); - List existingTags = brokerTopic != null && brokerTopic.getMetadata().getTags() != null ? brokerTopic.getMetadata().getTags() : Collections.emptyList(); - List newTags = ns4kafkaTopic.getMetadata().getTags() != null ? ns4kafkaTopic.getMetadata().getTags() : Collections.emptyList(); + List existingTags = brokerTopic != null && brokerTopic.getSpec().getTags() != null ? brokerTopic.getSpec().getTags() : Collections.emptyList(); + List newTags = ns4kafkaTopic.getSpec().getTags() != null ? ns4kafkaTopic.getSpec().getTags() : Collections.emptyList(); return newTags.stream().filter(tag -> !existingTags.contains(tag)).map(tag -> TagSpecs.builder() .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID)+":"+ns4kafkaTopic.getMetadata().getName()) @@ -151,8 +147,8 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi Optional newTopic = ns4kafkaTopics.stream() .filter(ns4kafkaTopic -> ns4kafkaTopic.getMetadata().getName().equals(brokerTopic.getMetadata().getName())) .findFirst(); - List newTags = newTopic.isPresent() && newTopic.get().getMetadata().getTags() != null ? newTopic.get().getMetadata().getTags() : Collections.emptyList(); - List existingTags = brokerTopic.getMetadata().getTags() != null ? brokerTopic.getMetadata().getTags() : Collections.emptyList(); + List newTags = newTopic.isPresent() && newTopic.get().getSpec().getTags() != null ? newTopic.get().getSpec().getTags() : Collections.emptyList(); + List existingTags = brokerTopic.getSpec().getTags() != null ? brokerTopic.getSpec().getTags() : Collections.emptyList(); return existingTags.stream().filter(tag -> !newTags.contains(tag)).map(tag -> TagTopicInfo.builder() .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID)+":"+brokerTopic.getMetadata().getName()) @@ -242,7 +238,7 @@ public void completeWithTags(Map topics) { if(kafkaAsyncExecutorConfig.getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { topics.entrySet().stream() .forEach(entry -> - entry.getValue().getMetadata().setTags(schemaRegistryClient.getTopicWithTags(kafkaAsyncExecutorConfig.getName(), + entry.getValue().getSpec().setTags(schemaRegistryClient.getTopicWithTags(kafkaAsyncExecutorConfig.getName(), kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) .block().stream().map(TagTopicInfo::typeName).toList())); } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 6e549ba5..5ebab2d3 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -249,11 +249,11 @@ void createNewTopic() throws InterruptedException, ExecutionException, TimeoutEx Topic topic = Topic.builder() .metadata(ObjectMeta.builder() .name("test.topic") - .tags(List.of("TAG_TEST")) .build()) .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(List.of("TAG_TEST")) .configs(Map.of("cleanup.policy","delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -451,13 +451,13 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE Topic existing = Topic.builder() .metadata(ObjectMeta.builder() .name("test.topic") - .tags(Collections.emptyList()) .namespace("test") .cluster("local") .build()) .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy","compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -467,11 +467,11 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE Topic topic = Topic.builder() .metadata(ObjectMeta.builder() .name("test.topic") - .tags(Collections.emptyList()) .build()) .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy","compact", "min.insync.replicas", "2", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 694ce385..c4dbf756 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -939,7 +939,7 @@ void findAll() { } @Test - void validateTagsShouldWork() { + void shouldTagsBeValid() { Namespace ns = Namespace.builder() .metadata(ObjectMeta.builder() .name("namespace") @@ -948,7 +948,9 @@ void validateTagsShouldWork() { .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("TAG_TEST")).build()) + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) .build(); List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); @@ -961,7 +963,7 @@ void validateTagsShouldWork() { } @Test - void validateTagsShouldReturnErrorBecauseOfNonConfluentBroker() { + void shouldTagsBeInvalidWhenNotConfluentCloud() { Namespace ns = Namespace.builder() .metadata(ObjectMeta.builder() .name("namespace") @@ -970,7 +972,9 @@ void validateTagsShouldReturnErrorBecauseOfNonConfluentBroker() { .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("TAG_TEST")).build()) + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) .build(); when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED))); @@ -981,7 +985,7 @@ void validateTagsShouldReturnErrorBecauseOfNonConfluentBroker() { } @Test - void validateTagsShouldReturnErrorBecauseOfNoTagsDefinedInBroker() { + void shouldTagsBeInvalidWhenNoTagsAllowed() { Namespace ns = Namespace.builder() .metadata(ObjectMeta.builder() .name("namespace") @@ -990,7 +994,9 @@ void validateTagsShouldReturnErrorBecauseOfNoTagsDefinedInBroker() { .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("TAG_TEST")).build()) + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) .build(); when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); @@ -1002,7 +1008,7 @@ void validateTagsShouldReturnErrorBecauseOfNoTagsDefinedInBroker() { } @Test - void validateTagsShouldReturnErrorBecauseOfBadTagsDefined() { + void shouldTagsBeInvalidWhenNotAllowed() { Namespace ns = Namespace.builder() .metadata(ObjectMeta.builder() .name("namespace") @@ -1011,7 +1017,9 @@ void validateTagsShouldReturnErrorBecauseOfBadTagsDefined() { .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").tags(List.of("BAD_TAG")).build()) + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) .build(); List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java b/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java new file mode 100644 index 00000000..1260b319 --- /dev/null +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java @@ -0,0 +1,25 @@ +package com.michelin.ns4kafka.services.executors; + +import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; +import org.mockito.ArgumentMatcher; + +import java.util.List; + +public class TagSpecsArgumentMatcher implements ArgumentMatcher> { + + private List left; + + public TagSpecsArgumentMatcher(List tagSpecsList) { + this.left = tagSpecsList; + } + + @Override + public boolean matches(List right) { + if(left.size() != right.size()) { + return false; + } + return left.get(0).entityName().equals(right.get(0).entityName()) && + left.get(0).entityType().equals(right.get(0).entityType()) && + left.get(0).typeName().equals(right.get(0).typeName()); + } +} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java deleted file mode 100644 index 974115e4..00000000 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorMock.java +++ /dev/null @@ -1,41 +0,0 @@ -package com.michelin.ns4kafka.services.executors; - -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.models.ObjectMeta; -import com.michelin.ns4kafka.models.Topic; -import com.michelin.ns4kafka.repositories.TopicRepository; -import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; -import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; -import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; -import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; -import io.micronaut.context.annotation.EachBean; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.admin.*; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.ConfigResource; - -import java.time.Instant; -import java.util.*; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Function; -import java.util.stream.Collectors; - -import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; -import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; - -public class TopicAsyncExecutorMock extends TopicAsyncExecutor { - - public TopicAsyncExecutorMock(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - super(kafkaAsyncExecutorConfig); - } - - public void setSchemaRegistryClient(SchemaRegistryClient schemaRegistryClient) { - this.schemaRegistryClient = schemaRegistryClient; - } - -} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index b45d0648..956647e0 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -4,16 +4,20 @@ import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; +import org.testcontainers.shaded.org.hamcrest.Matchers; import reactor.core.publisher.Mono; import java.util.*; import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; +import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.mockito.ArgumentMatchers.anyList; @@ -31,57 +35,66 @@ class TopicAsyncExecutorTest { @Mock SchemaRegistryClient schemaRegistryClient; + @Mock + KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; + + @InjectMocks + TopicAsyncExecutor topicAsyncExecutor; + @Test void createTagsShouldAddTags() { - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); kafkaAsyncExecutorConfig.setConfig(properties); - TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); - topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); when(schemaRegistryClient.addTags(anyString(), anyList())).thenReturn(Mono.just(new ArrayList<>())); + when(kafkaAsyncExecutorConfig.getConfig()).thenReturn(properties); + when(kafkaAsyncExecutorConfig.getName()).thenReturn(LOCAL_CLUSTER); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG1)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1)).build()).build(); ns4kafkaTopics.add(ns4kafkaTopic); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG2)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG2)).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); - verify(schemaRegistryClient, times(1)).addTags(anyString(), anyList()); + List tagSpecsList = new ArrayList<>(); + TagSpecs tagSpecs = TagSpecs.builder().typeName(TAG1).entityName(CLUSTER_ID_TEST+":"+TOPIC_NAME).entityType(TOPIC_ENTITY_TYPE).build(); + tagSpecsList.add(tagSpecs); + verify(schemaRegistryClient, times(1)).addTags(eq(LOCAL_CLUSTER), argThat(new TagSpecsArgumentMatcher(tagSpecsList))); } @Test void createTagsShouldNotAddTags() { - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); kafkaAsyncExecutorConfig.setConfig(properties); - TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); - topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG1)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1)).build()).build(); ns4kafkaTopics.add(ns4kafkaTopic); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG1)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1)).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); @@ -91,55 +104,55 @@ void createTagsShouldNotAddTags() { @Test void deleteTagsShouldDeleteTags() { - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); kafkaAsyncExecutorConfig.setConfig(properties); - TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); - topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); when(schemaRegistryClient.deleteTag(anyString(),anyString(),anyString())).thenReturn(Mono.just(new HttpResponseMock())); + when(kafkaAsyncExecutorConfig.getConfig()).thenReturn(properties); + when(kafkaAsyncExecutorConfig.getName()).thenReturn(LOCAL_CLUSTER); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG2)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG2)).build()).build(); ns4kafkaTopics.add(ns4kafkaTopic); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG1,TAG2)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1,TAG2)).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); - verify(schemaRegistryClient, times(1)).deleteTag(anyString(),anyString(),anyString()); + verify(schemaRegistryClient, times(1)).deleteTag(eq(LOCAL_CLUSTER),eq(CLUSTER_ID_TEST+":"+TOPIC_NAME),eq(TAG1)); } @Test void deleteTagsShouldNotDeleteTags() { - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); kafkaAsyncExecutorConfig.setConfig(properties); - TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); - topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG1)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1)).build()).build(); ns4kafkaTopics.add(ns4kafkaTopic); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() .metadata(ObjectMeta.builder() - .tags(List.of(TAG1)) - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1)).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); @@ -149,45 +162,46 @@ void deleteTagsShouldNotDeleteTags() { @Test void completeWithTagsShouldComplete() { - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER, KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); kafkaAsyncExecutorConfig.setConfig(properties); - TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); - topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); when(schemaRegistryClient.getTopicWithTags(anyString(),anyString())).thenReturn(Mono.just(List.of(tagTopicInfo))); + when(kafkaAsyncExecutorConfig.getConfig()).thenReturn(properties); + when(kafkaAsyncExecutorConfig.getName()).thenReturn(LOCAL_CLUSTER); + when(kafkaAsyncExecutorConfig.getProvider()).thenReturn(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder().build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.completeWithTags(brokerTopics); - assertEquals(TAG1,brokerTopics.get(TOPIC_NAME).getMetadata().getTags().get(0)); + assertEquals(TAG1,brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); } @Test void completeWithTagsShouldNotComplete() { - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig = new KafkaAsyncExecutorConfig(LOCAL_CLUSTER, KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED); Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); kafkaAsyncExecutorConfig.setConfig(properties); - TopicAsyncExecutorMock topicAsyncExecutor = new TopicAsyncExecutorMock(kafkaAsyncExecutorConfig); - topicAsyncExecutor.setSchemaRegistryClient(schemaRegistryClient); + + when(kafkaAsyncExecutorConfig.getProvider()).thenReturn(KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()).build(); + .name(TOPIC_NAME).build()) + .spec(Topic.TopicSpec.builder().build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.completeWithTags(brokerTopics); - assertNull(brokerTopics.get(TOPIC_NAME).getMetadata().getTags()); + assertNull(brokerTopics.get(TOPIC_NAME).getSpec().getTags()); } } From 2888436059575192579a61183e4a6f8a5a0aacf9 Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 09:32:01 +0200 Subject: [PATCH 09/27] Manage Confluent tags --- .../controllers/TopicControllerTest.java | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 5ebab2d3..e40400d7 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -378,6 +378,130 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep assertEquals("test.topic", actual.getMetadata().getName()); } + /** + * Validate topic update with two new tags + * @throws InterruptedException Any interrupted exception + * @throws ExecutionException Any execution exception + * @throws TimeoutException Any timeout exception + */ + @Test + void updateTopicWithNewTags() throws InterruptedException, ExecutionException, TimeoutException { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + Topic existing = Topic.builder() + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy","compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .tags(Arrays.asList("TAG1", "TAG2")) + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy","delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); + when(topicService.create(topic)).thenReturn(topic); + when(securityService.username()).thenReturn(Optional.of("test-user")); + when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); + doNothing().when(applicationEventPublisher).publishEvent(any()); + + var response = topicController.apply("test", topic, false); + Topic actual = response.body(); + assertEquals("changed", response.header("X-Ns4kafka-Result")); + assertEquals("test.topic", actual.getMetadata().getName()); + assertEquals(2, actual.getSpec().getTags().size()); + assertEquals("TAG1", actual.getSpec().getTags().get(0)); + assertEquals("TAG2", actual.getSpec().getTags().get(1)); + } + + /** + * Validate topic update with a tag to delete + * @throws InterruptedException Any interrupted exception + * @throws ExecutionException Any execution exception + * @throws TimeoutException Any timeout exception + */ + @Test + void updateTopicWithTagToDelete() throws InterruptedException, ExecutionException, TimeoutException { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); + + Topic existing = Topic.builder() + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .tags(Arrays.asList("TAG1", "TAG2")) + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy","compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG1")) + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy","delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(namespaceService.findByName("test")) + .thenReturn(Optional.of(ns)); + when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); + when(topicService.create(topic)).thenReturn(topic); + when(securityService.username()).thenReturn(Optional.of("test-user")); + when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); + doNothing().when(applicationEventPublisher).publishEvent(any()); + + var response = topicController.apply("test", topic, false); + Topic actual = response.body(); + assertEquals("changed", response.header("X-Ns4kafka-Result")); + assertEquals("test.topic", actual.getMetadata().getName()); + assertEquals(1, actual.getSpec().getTags().size()); + assertEquals("TAG1", actual.getSpec().getTags().get(0)); + } + /** * Validate topic update when there are validations errors */ From f2a2a3009bd68a9ef31e088d5cf3ad8f72a2cc90 Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 12:15:45 +0200 Subject: [PATCH 10/27] Manage Confluent tags --- .../controllers/topic/TopicController.java | 6 +---- .../ns4kafka/services/TopicService.java | 11 ++++----- .../clients/schema/SchemaRegistryClient.java | 14 ++++------- .../executors/TopicAsyncExecutor.java | 23 ++++++++----------- .../ns4kafka/services/TopicServiceTest.java | 9 ++++---- 5 files changed, 24 insertions(+), 39 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index 1fcc3705..89143b5a 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -21,11 +21,7 @@ import jakarta.inject.Inject; import jakarta.validation.Valid; import java.time.Instant; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Optional; +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 55d48333..524a5927 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -16,11 +16,8 @@ import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; + +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -333,12 +330,12 @@ public Map deleteRecords(Topic topic, Map validateTags(Namespace namespace, Topic topic) { List validationErrors = new ArrayList<>(); - Optional topicCluster = kafkaAsyncExecutorConfig + Optional topicCluster = managedClusterProperties .stream() .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) .findFirst(); - if(topicCluster.isPresent() && !topicCluster.get().getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { + if(topicCluster.isPresent() && !topicCluster.get().getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { validationErrors.add(String.format("Invalid value (%s) for tags: Tags are not currently supported.", String.join(",", topic.getSpec().getTags()))); return validationErrors; } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java index b4aa05b7..5685c7e0 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java @@ -1,11 +1,7 @@ package com.michelin.ns4kafka.services.clients.schema; import com.michelin.ns4kafka.properties.ManagedClusterProperties; -import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityCheckResponse; -import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityRequest; -import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityResponse; -import com.michelin.ns4kafka.services.clients.schema.entities.SchemaRequest; -import com.michelin.ns4kafka.services.clients.schema.entities.SchemaResponse; +import com.michelin.ns4kafka.services.clients.schema.entities.*; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.core.type.Argument; import io.micronaut.core.util.StringUtils; @@ -179,7 +175,7 @@ public Mono deleteCurrentCompatibilityBySubject(Str * @return A list of tags */ public Mono> getTags(String kafkaCluster) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/types/tagdefs"))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagInfo.class))); @@ -192,7 +188,7 @@ public Mono> getTags(String kafkaCluster) { * @return A list of tags */ public Mono> getTopicWithTags(String kafkaCluster, String entityName) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); @@ -205,7 +201,7 @@ public Mono> getTopicWithTags(String kafkaCluster, String ent * @return Information about added tags */ public Mono> addTags(String kafkaCluster, List tagSpecs) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/tags")), tagSpecs) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); @@ -219,7 +215,7 @@ public Mono> addTags(String kafkaCluster, List tagS * @return The resume response */ public Mono> deleteTag(String kafkaCluster, String entityName, String tagName) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags/" + tagName))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.exchange(request, Void.class)); diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 8730d456..a867c16c 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -17,12 +17,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigResource; import java.time.Instant; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Objects; +import java.util.*; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -56,8 +51,10 @@ public class TopicAsyncExecutor { private final ManagedClusterProperties managedClusterProperties; + @Inject TopicRepository topicRepository; + @Inject SchemaRegistryClient schemaRegistryClient; public TopicAsyncExecutor(ManagedClusterProperties managedClusterProperties) { @@ -156,14 +153,14 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi List newTags = ns4kafkaTopic.getSpec().getTags() != null ? ns4kafkaTopic.getSpec().getTags() : Collections.emptyList(); return newTags.stream().filter(tag -> !existingTags.contains(tag)).map(tag -> TagSpecs.builder() - .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID)+":"+ns4kafkaTopic.getMetadata().getName()) + .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID)+":"+ns4kafkaTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); }).toList(); if(!tagsToCreate.isEmpty()) { - schemaRegistryClient.addTags(kafkaAsyncExecutorConfig.getName(), tagsToCreate).block(); + schemaRegistryClient.addTags(managedClusterProperties.getName(), tagsToCreate).block(); } } @@ -182,13 +179,13 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi List existingTags = brokerTopic.getSpec().getTags() != null ? brokerTopic.getSpec().getTags() : Collections.emptyList(); return existingTags.stream().filter(tag -> !newTags.contains(tag)).map(tag -> TagTopicInfo.builder() - .entityName(kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID)+":"+brokerTopic.getMetadata().getName()) + .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID)+":"+brokerTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); }).toList(); - tagsToDelete.forEach(tag -> schemaRegistryClient.deleteTag(kafkaAsyncExecutorConfig.getName(), tag.entityName(), tag.typeName()).block()); + tagsToDelete.forEach(tag -> schemaRegistryClient.deleteTag(managedClusterProperties.getName(), tag.entityName(), tag.typeName()).block()); } /** @@ -228,11 +225,11 @@ public List listBrokerTopicNames() throws InterruptedException, Executio * @param topics Topics to complete */ public void completeWithTags(Map topics) { - if(kafkaAsyncExecutorConfig.getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD)) { + if(managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { topics.entrySet().stream() .forEach(entry -> - entry.getValue().getSpec().setTags(schemaRegistryClient.getTopicWithTags(kafkaAsyncExecutorConfig.getName(), - kafkaAsyncExecutorConfig.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) + entry.getValue().getSpec().setTags(schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) .block().stream().map(TagTopicInfo::typeName).toList())); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 8d1bec45..a0168da3 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -13,7 +13,6 @@ import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.TopicRepository; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; @@ -902,7 +901,7 @@ void shouldTagsBeValid() { List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); List validationErrors = topicService.validateTags(ns, topic); @@ -924,7 +923,7 @@ void shouldTagsBeInvalidWhenNotConfluentCloud() { .tags(List.of("TAG_TEST")).build()) .build(); - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED))); + when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.SELF_MANAGED))); List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); @@ -946,7 +945,7 @@ void shouldTagsBeInvalidWhenNoTagsAllowed() { .tags(List.of("TAG_TEST")).build()) .build(); - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(Collections.emptyList())); List validationErrors = topicService.validateTags(ns, topic); @@ -971,7 +970,7 @@ void shouldTagsBeInvalidWhenNotAllowed() { List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); List validationErrors = topicService.validateTags(ns, topic); From 2767d2e9d2c302dbfdd91eb29e73c2d9a91cd4c3 Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 13:02:43 +0200 Subject: [PATCH 11/27] Manage Confluent tags --- .../ns4kafka/services/TopicService.java | 3 +- .../clients/schema/entities/TagSpecs.java | 6 + .../clients/schema/entities/TagTopicInfo.java | 6 + .../executors/TopicAsyncExecutor.java | 125 ++++++++++-------- .../ns4kafka/utils/config/ClusterConfig.java | 9 -- .../ns4kafka/utils/tags/TagsUtils.java | 8 -- .../services/executors/HttpResponseMock.java | 10 ++ .../executors/TopicAsyncExecutorTest.java | 41 +++--- 8 files changed, 110 insertions(+), 98 deletions(-) delete mode 100644 src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java delete mode 100644 src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 524a5927..e7a30b61 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -344,8 +344,7 @@ public List validateTags(Namespace namespace, Topic topic) { .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); if(tagNames == null || tagNames.isEmpty()) { - validationErrors.add(String.format("Invalid value (%s) for tags: No tags defined on the kafka cluster.", - String.join(",", topic.getSpec().getTags()))); + validationErrors.add(String.format("Invalid value (%s) for tags: No tags allowed.", String.join(",", topic.getSpec().getTags()))); return validationErrors; } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java index a991741a..865fd961 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java @@ -4,4 +4,10 @@ @Builder public record TagSpecs(String entityName, String entityType, String typeName) { + + @Override + public String toString() { + return entityName + "/" + typeName; + } + } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java index c3a9e393..921b6ee5 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java @@ -4,4 +4,10 @@ @Builder public record TagTopicInfo(String entityName, String entityType, String typeName, String entityStatus) { + + @Override + public String toString() { + return entityName + "/" + typeName; + } + } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index a867c16c..e9fd43b1 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -13,7 +13,6 @@ import jakarta.inject.Singleton; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigResource; import java.time.Instant; @@ -24,7 +23,6 @@ import java.util.concurrent.TimeoutException; import java.util.function.Function; import java.util.stream.Collectors; -import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.AlterConfigsResult; @@ -35,11 +33,6 @@ import org.apache.kafka.clients.admin.RecordsToDelete; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.admin.TopicListing; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.ConfigResource; - -import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; -import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; /** * Topic executor. @@ -49,6 +42,10 @@ @Singleton @AllArgsConstructor public class TopicAsyncExecutor { + + public static final String CLUSTER_ID = "cluster.id"; + public static final String TOPIC_ENTITY_TYPE = "kafka_topic"; + private final ManagedClusterProperties managedClusterProperties; @Inject @@ -85,43 +82,43 @@ public void synchronizeTopics() { List ns4kafkaTopics = topicRepository.findAllForCluster(managedClusterProperties.getName()); List toCreate = ns4kafkaTopics.stream() - .filter(topic -> !brokerTopics.containsKey(topic.getMetadata().getName())) - .toList(); + .filter(topic -> !brokerTopics.containsKey(topic.getMetadata().getName())) + .toList(); List toCheckConf = ns4kafkaTopics.stream() - .filter(topic -> brokerTopics.containsKey(topic.getMetadata().getName())) - .toList(); + .filter(topic -> brokerTopics.containsKey(topic.getMetadata().getName())) + .toList(); Map> toUpdate = toCheckConf.stream() - .map(topic -> { - Map actualConf = - brokerTopics.get(topic.getMetadata().getName()).getSpec().getConfigs(); - Map expectedConf = - topic.getSpec().getConfigs() == null ? Map.of() : topic.getSpec().getConfigs(); - Collection topicConfigChanges = computeConfigChanges(expectedConf, actualConf); - if (!topicConfigChanges.isEmpty()) { - ConfigResource cr = - new ConfigResource(ConfigResource.Type.TOPIC, topic.getMetadata().getName()); - return Map.entry(cr, topicConfigChanges); - } - return null; - }) - .filter(Objects::nonNull) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .map(topic -> { + Map actualConf = + brokerTopics.get(topic.getMetadata().getName()).getSpec().getConfigs(); + Map expectedConf = + topic.getSpec().getConfigs() == null ? Map.of() : topic.getSpec().getConfigs(); + Collection topicConfigChanges = computeConfigChanges(expectedConf, actualConf); + if (!topicConfigChanges.isEmpty()) { + ConfigResource cr = + new ConfigResource(ConfigResource.Type.TOPIC, topic.getMetadata().getName()); + return Map.entry(cr, topicConfigChanges); + } + return null; + }) + .filter(Objects::nonNull) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); if (!toCreate.isEmpty()) { log.debug("Topic(s) to create: " - + String.join(",", toCreate.stream().map(topic -> topic.getMetadata().getName()).toList())); + + String.join(",", toCreate.stream().map(topic -> topic.getMetadata().getName()).toList())); } if (!toUpdate.isEmpty()) { log.debug("Topic(s) to update: " - + String.join(",", toUpdate.keySet().stream().map(ConfigResource::name).toList())); + + String.join(",", toUpdate.keySet().stream().map(ConfigResource::name).toList())); for (Map.Entry> e : toUpdate.entrySet()) { for (AlterConfigOp op : e.getValue()) { log.debug( - e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "(" - + op.configEntry().value() + ")"); + e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "(" + + op.configEntry().value() + ")"); } } } @@ -129,9 +126,10 @@ public void synchronizeTopics() { createTopics(toCreate); alterTopics(toUpdate, toCheckConf); - createTags(ns4kafkaTopics, brokerTopics); - deleteTags(ns4kafkaTopics, brokerTopics); - + if (managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { + createTags(ns4kafkaTopics, brokerTopics); + deleteTags(ns4kafkaTopics, brokerTopics); + } } catch (ExecutionException | TimeoutException | CancellationException | KafkaStoreException e) { log.error("Error", e); } catch (InterruptedException e) { @@ -159,8 +157,14 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi .build()); }).toList(); - if(!tagsToCreate.isEmpty()) { - schemaRegistryClient.addTags(managedClusterProperties.getName(), tagsToCreate).block(); + if (!tagsToCreate.isEmpty()) { + String stringTags = String.join(",", tagsToCreate + .stream() + .map(Record::toString) + .toList()); + schemaRegistryClient.addTags(managedClusterProperties.getName(), tagsToCreate) + .subscribe(success -> log.debug(String.format("Success creating tag %s.", stringTags)), + error -> log.error(String.format("Error creating tag %s.", stringTags))); } } @@ -170,20 +174,26 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi * @param brokerTopics Topics from broker */ public void deleteTags(List ns4kafkaTopics, Map brokerTopics) { - - List tagsToDelete = brokerTopics.values().stream().flatMap(brokerTopic -> { - Optional newTopic = ns4kafkaTopics.stream() - .filter(ns4kafkaTopic -> ns4kafkaTopic.getMetadata().getName().equals(brokerTopic.getMetadata().getName())) - .findFirst(); - List newTags = newTopic.isPresent() && newTopic.get().getSpec().getTags() != null ? newTopic.get().getSpec().getTags() : Collections.emptyList(); - List existingTags = brokerTopic.getSpec().getTags() != null ? brokerTopic.getSpec().getTags() : Collections.emptyList(); - - return existingTags.stream().filter(tag -> !newTags.contains(tag)).map(tag -> TagTopicInfo.builder() - .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID)+":"+brokerTopic.getMetadata().getName()) - .typeName(tag) - .entityType(TOPIC_ENTITY_TYPE) - .build()); - }).toList(); + List tagsToDelete = brokerTopics + .values() + .stream() + .flatMap(brokerTopic -> { + Optional newTopic = ns4kafkaTopics + .stream() + .filter(ns4kafkaTopic -> ns4kafkaTopic.getMetadata().getName().equals(brokerTopic.getMetadata().getName())) + .findFirst(); + + Set existingTags = new HashSet<>(brokerTopic.getSpec().getTags()); + Set newTags = newTopic.isPresent() ? new HashSet<>(newTopic.get().getSpec().getTags()) : Collections.emptySet(); + existingTags.removeAll(newTags); + return existingTags + .stream() + .map(tag -> TagTopicInfo.builder() + .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + brokerTopic.getMetadata().getName()) + .typeName(tag) + .entityType(TOPIC_ENTITY_TYPE) + .build()); + }).toList(); tagsToDelete.forEach(tag -> schemaRegistryClient.deleteTag(managedClusterProperties.getName(), tag.entityName(), tag.typeName()).block()); } @@ -221,16 +231,15 @@ public List listBrokerTopicNames() throws InterruptedException, Executio } /** - * Complete topics with confluent tags + * Enrich topics with confluent tags * @param topics Topics to complete */ - public void completeWithTags(Map topics) { + public void enrichWithTags(Map topics) { if(managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { - topics.entrySet().stream() - .forEach(entry -> - entry.getValue().getSpec().setTags(schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), - managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + entry.getValue().getMetadata().getName()) - .block().stream().map(TagTopicInfo::typeName).toList())); + topics.forEach((key,value) -> + value.getSpec().setTags(schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + value.getMetadata().getName()) + .block().stream().map(TagTopicInfo::typeName).toList())); } } @@ -279,8 +288,8 @@ public Map collectBrokerTopicsFromNames(List topicNames) .build() ) .collect(Collectors.toMap(topic -> topic.getMetadata().getName(), Function.identity())); - - completeWithTags(topics); + + enrichWithTags(topics); return topics; } diff --git a/src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java b/src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java deleted file mode 100644 index d05e79fd..00000000 --- a/src/main/java/com/michelin/ns4kafka/utils/config/ClusterConfig.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.michelin.ns4kafka.utils.config; - -public final class ClusterConfig { - - public static final String CLUSTER_ID = "cluster.id"; - - private ClusterConfig() { - } -} diff --git a/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java b/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java deleted file mode 100644 index a75e8f23..00000000 --- a/src/main/java/com/michelin/ns4kafka/utils/tags/TagsUtils.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.michelin.ns4kafka.utils.tags; - -public final class TagsUtils { - public static final String TOPIC_ENTITY_TYPE = "kafka_topic"; - - private TagsUtils() { - } -} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java index 5dea72d5..f91f2486 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java @@ -27,4 +27,14 @@ public MutableConvertibleValues getAttributes() { public Optional getBody() { return Optional.empty(); } + + @Override + public String reason() { + return null; + } + + @Override + public int code() { + return 0; + } } diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index 956647e0..6b92d0f7 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -1,8 +1,8 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; @@ -11,13 +11,12 @@ import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import org.testcontainers.shaded.org.hamcrest.Matchers; import reactor.core.publisher.Mono; import java.util.*; -import static com.michelin.ns4kafka.utils.config.ClusterConfig.CLUSTER_ID; -import static com.michelin.ns4kafka.utils.tags.TagsUtils.TOPIC_ENTITY_TYPE; +import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.CLUSTER_ID; +import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.mockito.ArgumentMatchers.anyList; @@ -36,7 +35,7 @@ class TopicAsyncExecutorTest { SchemaRegistryClient schemaRegistryClient; @Mock - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; + ManagedClusterProperties managedClusterProperties; @InjectMocks TopicAsyncExecutor topicAsyncExecutor; @@ -45,11 +44,11 @@ class TopicAsyncExecutorTest { void createTagsShouldAddTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - kafkaAsyncExecutorConfig.setConfig(properties); + managedClusterProperties.setConfig(properties); when(schemaRegistryClient.addTags(anyString(), anyList())).thenReturn(Mono.just(new ArrayList<>())); - when(kafkaAsyncExecutorConfig.getConfig()).thenReturn(properties); - when(kafkaAsyncExecutorConfig.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getConfig()).thenReturn(properties); + when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -79,7 +78,7 @@ void createTagsShouldAddTags() { void createTagsShouldNotAddTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); - kafkaAsyncExecutorConfig.setConfig(properties); + managedClusterProperties.setConfig(properties); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -106,11 +105,11 @@ void createTagsShouldNotAddTags() { void deleteTagsShouldDeleteTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); - kafkaAsyncExecutorConfig.setConfig(properties); + managedClusterProperties.setConfig(properties); when(schemaRegistryClient.deleteTag(anyString(),anyString(),anyString())).thenReturn(Mono.just(new HttpResponseMock())); - when(kafkaAsyncExecutorConfig.getConfig()).thenReturn(properties); - when(kafkaAsyncExecutorConfig.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getConfig()).thenReturn(properties); + when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -137,7 +136,7 @@ void deleteTagsShouldDeleteTags() { void deleteTagsShouldNotDeleteTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); - kafkaAsyncExecutorConfig.setConfig(properties); + managedClusterProperties.setConfig(properties); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -164,14 +163,14 @@ void deleteTagsShouldNotDeleteTags() { void completeWithTagsShouldComplete() { Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); - kafkaAsyncExecutorConfig.setConfig(properties); + managedClusterProperties.setConfig(properties); TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); when(schemaRegistryClient.getTopicWithTags(anyString(),anyString())).thenReturn(Mono.just(List.of(tagTopicInfo))); - when(kafkaAsyncExecutorConfig.getConfig()).thenReturn(properties); - when(kafkaAsyncExecutorConfig.getName()).thenReturn(LOCAL_CLUSTER); - when(kafkaAsyncExecutorConfig.getProvider()).thenReturn(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD); + when(managedClusterProperties.getConfig()).thenReturn(properties); + when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() @@ -180,7 +179,7 @@ void completeWithTagsShouldComplete() { .spec(Topic.TopicSpec.builder().build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); - topicAsyncExecutor.completeWithTags(brokerTopics); + topicAsyncExecutor.enrichWithTags(brokerTopics); assertEquals(TAG1,brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); } @@ -189,9 +188,9 @@ void completeWithTagsShouldComplete() { void completeWithTagsShouldNotComplete() { Properties properties = new Properties(); properties.put(CLUSTER_ID,CLUSTER_ID_TEST); - kafkaAsyncExecutorConfig.setConfig(properties); + managedClusterProperties.setConfig(properties); - when(kafkaAsyncExecutorConfig.getProvider()).thenReturn(KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED); + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); Map brokerTopics = new HashMap<>(); Topic brokerTopic = Topic.builder() @@ -200,7 +199,7 @@ void completeWithTagsShouldNotComplete() { .spec(Topic.TopicSpec.builder().build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); - topicAsyncExecutor.completeWithTags(brokerTopics); + topicAsyncExecutor.enrichWithTags(brokerTopics); assertNull(brokerTopics.get(TOPIC_NAME).getSpec().getTags()); } From ea6c41ea92f3b573eae0a6b6954e63d0dac1a70d Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 14:46:55 +0200 Subject: [PATCH 12/27] Manage Confluent tags --- .../controllers/topic/TopicController.java | 13 +++- .../ns4kafka/services/TopicService.java | 29 +++++--- .../clients/schema/SchemaRegistryClient.java | 48 ++++++++++--- .../clients/schema/entities/TagInfo.java | 5 ++ .../clients/schema/entities/TagSpecs.java | 9 ++- .../clients/schema/entities/TagTopicInfo.java | 8 +++ .../executors/TopicAsyncExecutor.java | 72 ++++++++++++++----- .../controllers/TopicControllerTest.java | 26 +++---- .../ns4kafka/services/TopicServiceTest.java | 20 ++++-- .../services/executors/HttpResponseMock.java | 4 +- .../executors/TagSpecsArgumentMatcher.java | 14 ++-- .../executors/TopicAsyncExecutorTest.java | 58 +++++++++------ 12 files changed, 213 insertions(+), 93 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index 89143b5a..bea5fd6a 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -21,7 +21,12 @@ import jakarta.inject.Inject; import jakarta.validation.Valid; import java.time.Instant; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -104,8 +109,10 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); } - List existingTags = existingTopic.isPresent() && existingTopic.get().getSpec().getTags() != null ? existingTopic.get().getSpec().getTags() : Collections.emptyList(); - if(topic.getSpec().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { + List existingTags = existingTopic.isPresent() && existingTopic.get().getSpec().getTags() != null + ? existingTopic.get().getSpec().getTags() + : Collections.emptyList(); + if (topic.getSpec().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { validationErrors.addAll(topicService.validateTags(ns, topic)); } diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index e7a30b61..d28ab3f5 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -16,8 +16,12 @@ import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; - -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -322,7 +326,8 @@ public Map deleteRecords(Topic topic, Map validateTags(Namespace namespace, Topic topic) { .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) .findFirst(); - if(topicCluster.isPresent() && !topicCluster.get().getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { - validationErrors.add(String.format("Invalid value (%s) for tags: Tags are not currently supported.", String.join(",", topic.getSpec().getTags()))); + if (topicCluster.isPresent() + && !topicCluster.get().getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { + validationErrors.add(String.format( + "Invalid value (%s) for tags: Tags are not currently supported.", + String.join(",", topic.getSpec().getTags()))); return validationErrors; } Set tagNames = schemaRegistryClient.getTags(namespace.getMetadata().getCluster()) .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); - if(tagNames == null || tagNames.isEmpty()) { - validationErrors.add(String.format("Invalid value (%s) for tags: No tags allowed.", String.join(",", topic.getSpec().getTags()))); + if (tagNames == null || tagNames.isEmpty()) { + validationErrors.add(String.format( + "Invalid value (%s) for tags: No tags allowed.", + String.join(",", topic.getSpec().getTags()))); return validationErrors; } - if(!tagNames.containsAll(topic.getSpec().getTags())) { - validationErrors.add(String.format("Invalid value (%s) for tags: Available tags are (%s).", + if (!tagNames.containsAll(topic.getSpec().getTags())) { + validationErrors.add(String.format( + "Invalid value (%s) for tags: Available tags are (%s).", String.join(",", topic.getSpec().getTags()), String.join(",", tagNames))); } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java index 5685c7e0..41a4e20a 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java @@ -1,7 +1,14 @@ package com.michelin.ns4kafka.services.clients.schema; import com.michelin.ns4kafka.properties.ManagedClusterProperties; -import com.michelin.ns4kafka.services.clients.schema.entities.*; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityCheckResponse; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityRequest; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityResponse; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaRequest; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaResponse; +import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; +import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; +import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.core.type.Argument; import io.micronaut.core.util.StringUtils; @@ -170,45 +177,61 @@ public Mono deleteCurrentCompatibilityBySubject(Str } /** - * List tags + * List tags. + * * @param kafkaCluster The Kafka cluster * @return A list of tags */ public Mono> getTags(String kafkaCluster) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/types/tagdefs"))) + HttpRequest request = HttpRequest + .GET( + URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/types/tagdefs"))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagInfo.class))); } /** - * List tags of a topic + * List tags of a topic. + * * @param kafkaCluster The Kafka cluster * @param entityName The topic's name for the API * @return A list of tags */ public Mono> getTopicWithTags(String kafkaCluster, String entityName) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) + HttpRequest request = HttpRequest + .GET( + URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); } /** - * Add a tag to a topic + * Add a tag to a topic. + * * @param kafkaCluster The Kafka cluster * @param tagSpecs Tags to add * @return Information about added tags */ public Mono> addTags(String kafkaCluster, List tagSpecs) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/tags")), tagSpecs) + HttpRequest request = HttpRequest + .POST( + URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/entity/tags")), tagSpecs) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); } /** - * Delete a tag to a topic + * Delete a tag to a topic. + * * @param kafkaCluster The Kafka cluster * @param entityName The topic's name * @param tagName The tag to delete @@ -216,13 +239,18 @@ public Mono> addTags(String kafkaCluster, List tagS */ public Mono> deleteTag(String kafkaCluster, String entityName, String tagName) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags/" + tagName))) + HttpRequest request = HttpRequest + .DELETE( + URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags/" + tagName))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.exchange(request, Void.class)); } /** - * Get the schema registry of the given Kafka cluster + * Get the schema registry of the given Kafka cluster. + * * @param kafkaCluster The Kafka cluster * @return The schema registry configuration */ diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java index 1391b78a..679cd5d3 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagInfo.java @@ -2,6 +2,11 @@ import lombok.Builder; +/** + * Tag name. + * + * @param name Tag name + */ @Builder public record TagInfo(String name) { } \ No newline at end of file diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java index 865fd961..42bcc36b 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java @@ -2,6 +2,13 @@ import lombok.Builder; +/** + * Tag Specs to call schema registry API. + * + * @param entityName The entity name + * @param entityType The entity type + * @param typeName The type name + */ @Builder public record TagSpecs(String entityName, String entityType, String typeName) { @@ -9,5 +16,5 @@ public record TagSpecs(String entityName, String entityType, String typeName) { public String toString() { return entityName + "/" + typeName; } - + } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java index 921b6ee5..fd5666ae 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java @@ -2,6 +2,14 @@ import lombok.Builder; +/** + * Information on tag. + * + * @param entityName The entity name + * @param entityType The entity type + * @param typeName The type name + * @param entityStatus The entity status + */ @Builder public record TagTopicInfo(String entityName, String entityType, String typeName, String entityStatus) { diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index e9fd43b1..9e9c2f83 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -11,18 +11,25 @@ import io.micronaut.context.annotation.EachBean; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.AllArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.ConfigResource; import java.time.Instant; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Function; import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.AlterConfigsResult; @@ -33,6 +40,8 @@ import org.apache.kafka.clients.admin.RecordsToDelete; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.admin.TopicListing; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigResource; /** * Topic executor. @@ -139,7 +148,8 @@ public void synchronizeTopics() { } /** - * Create tags + * Create tags. + * * @param ns4kafkaTopics Topics from ns4kafka * @param brokerTopics Topics from broker */ @@ -147,11 +157,18 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi List tagsToCreate = ns4kafkaTopics.stream().flatMap(ns4kafkaTopic -> { Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); - List existingTags = brokerTopic != null && brokerTopic.getSpec().getTags() != null ? brokerTopic.getSpec().getTags() : Collections.emptyList(); - List newTags = ns4kafkaTopic.getSpec().getTags() != null ? ns4kafkaTopic.getSpec().getTags() : Collections.emptyList(); + List existingTags = brokerTopic != null && brokerTopic.getSpec().getTags() != null + ? brokerTopic.getSpec().getTags() + : Collections.emptyList(); + List newTags = ns4kafkaTopic.getSpec().getTags() != null + ? ns4kafkaTopic.getSpec().getTags() + : Collections.emptyList(); return newTags.stream().filter(tag -> !existingTags.contains(tag)).map(tag -> TagSpecs.builder() - .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID)+":"+ns4kafkaTopic.getMetadata().getName()) + .entityName( + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + + ":" + + ns4kafkaTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); @@ -169,7 +186,8 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi } /** - * Delete tags + * Delete tags. + * * @param ns4kafkaTopics Topics from ns4kafka * @param brokerTopics Topics from broker */ @@ -180,26 +198,38 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi .flatMap(brokerTopic -> { Optional newTopic = ns4kafkaTopics .stream() - .filter(ns4kafkaTopic -> ns4kafkaTopic.getMetadata().getName().equals(brokerTopic.getMetadata().getName())) + .filter(ns4kafkaTopic -> ns4kafkaTopic + .getMetadata() + .getName() + .equals(brokerTopic.getMetadata().getName())) .findFirst(); Set existingTags = new HashSet<>(brokerTopic.getSpec().getTags()); - Set newTags = newTopic.isPresent() ? new HashSet<>(newTopic.get().getSpec().getTags()) : Collections.emptySet(); + Set newTags = newTopic.isPresent() + ? new HashSet<>(newTopic.get().getSpec().getTags()) + : Collections.emptySet(); existingTags.removeAll(newTags); return existingTags .stream() .map(tag -> TagTopicInfo.builder() - .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + brokerTopic.getMetadata().getName()) + .entityName(managedClusterProperties + .getConfig() + .getProperty(CLUSTER_ID) + ":" + brokerTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); }).toList(); - tagsToDelete.forEach(tag -> schemaRegistryClient.deleteTag(managedClusterProperties.getName(), tag.entityName(), tag.typeName()).block()); + tagsToDelete.forEach(tag -> + schemaRegistryClient.deleteTag( + managedClusterProperties.getName(), + tag.entityName(), + tag.typeName()).block()); } /** - * Delete a topic + * Delete a topic. + * * @param topic The topic to delete */ public void deleteTopic(Topic topic) throws InterruptedException, ExecutionException, TimeoutException { @@ -231,14 +261,18 @@ public List listBrokerTopicNames() throws InterruptedException, Executio } /** - * Enrich topics with confluent tags + * Enrich topics with confluent tags. + * * @param topics Topics to complete */ public void enrichWithTags(Map topics) { - if(managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { - topics.forEach((key,value) -> + if (managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { + topics.forEach((key, value) -> value.getSpec().setTags(schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), - managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + value.getMetadata().getName()) + managedClusterProperties + .getConfig() + .getProperty(CLUSTER_ID) + + ":" + value.getMetadata().getName()) .block().stream().map(TagTopicInfo::typeName).toList())); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 7887eea3..bc2fe34d 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -29,6 +29,7 @@ import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Optional; @@ -43,15 +44,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - - @ExtendWith(MockitoExtension.class) class TopicControllerTest { @Mock @@ -361,7 +353,8 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep } /** - * Validate topic update with two new tags + * Validate topic update with two new tags. + * * @throws InterruptedException Any interrupted exception * @throws ExecutionException Any execution exception * @throws TimeoutException Any timeout exception @@ -385,7 +378,7 @@ void updateTopicWithNewTags() throws InterruptedException, ExecutionException, T .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .configs(Map.of("cleanup.policy","compact", + .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) .build()) @@ -399,7 +392,7 @@ void updateTopicWithNewTags() throws InterruptedException, ExecutionException, T .tags(Arrays.asList("TAG1", "TAG2")) .replicationFactor(3) .partitions(3) - .configs(Map.of("cleanup.policy","delete", + .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) .build()) @@ -423,7 +416,8 @@ void updateTopicWithNewTags() throws InterruptedException, ExecutionException, T } /** - * Validate topic update with a tag to delete + * Validate topic update with a tag to delete. + * * @throws InterruptedException Any interrupted exception * @throws ExecutionException Any execution exception * @throws TimeoutException Any timeout exception @@ -448,7 +442,7 @@ void updateTopicWithTagToDelete() throws InterruptedException, ExecutionExceptio .tags(Arrays.asList("TAG1", "TAG2")) .replicationFactor(3) .partitions(3) - .configs(Map.of("cleanup.policy","compact", + .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) .build()) @@ -462,7 +456,7 @@ void updateTopicWithTagToDelete() throws InterruptedException, ExecutionExceptio .tags(List.of("TAG1")) .replicationFactor(3) .partitions(3) - .configs(Map.of("cleanup.policy","delete", + .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) .build()) @@ -485,7 +479,7 @@ void updateTopicWithTagToDelete() throws InterruptedException, ExecutionExceptio } /** - * Validate topic update when there are validations errors + * Validate topic update when there are validations errors. */ @Test void updateTopicValidationErrors() { diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index a0168da3..3d8c97fa 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -56,7 +56,7 @@ class TopicServiceTest { SchemaRegistryClient schemaRegistryClient; /** - * Validate find topic by name + * Validate find topic by name. */ @Test void findByName() { @@ -901,7 +901,8 @@ void shouldTagsBeValid() { List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); - when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); + when(managedClusterProperties.stream()).thenReturn(Stream.of( + new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); List validationErrors = topicService.validateTags(ns, topic); @@ -923,7 +924,8 @@ void shouldTagsBeInvalidWhenNotConfluentCloud() { .tags(List.of("TAG_TEST")).build()) .build(); - when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.SELF_MANAGED))); + when(managedClusterProperties.stream()).thenReturn(Stream.of( + new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.SELF_MANAGED))); List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); @@ -945,12 +947,15 @@ void shouldTagsBeInvalidWhenNoTagsAllowed() { .tags(List.of("TAG_TEST")).build()) .build(); - when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); + when(managedClusterProperties.stream()).thenReturn(Stream.of( + new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(Collections.emptyList())); List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); - assertEquals("Invalid value (TAG_TEST) for tags: No tags defined on the kafka cluster.", validationErrors.get(0)); + assertEquals( + "Invalid value (TAG_TEST) for tags: No tags defined on the kafka cluster.", + validationErrors.get(0)); } @Test @@ -970,7 +975,10 @@ void shouldTagsBeInvalidWhenNotAllowed() { List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); - when(managedClusterProperties.stream()).thenReturn(Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); + when(managedClusterProperties.stream()) + .thenReturn(Stream.of( + new ManagedClusterProperties("local", + ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); List validationErrors = topicService.validateTags(ns, topic); diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java index f91f2486..bfe7841f 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java @@ -4,9 +4,11 @@ import io.micronaut.http.HttpHeaders; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; - import java.util.Optional; +/** + * Class to Mock Http Response. + */ public class HttpResponseMock implements HttpResponse { @Override public HttpStatus getStatus() { diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java b/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java index 1260b319..e734b3f3 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java @@ -1,10 +1,12 @@ package com.michelin.ns4kafka.services.executors; import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; -import org.mockito.ArgumentMatcher; - import java.util.List; +import org.mockito.ArgumentMatcher; +/** + * Matcher for TagSpecs. + */ public class TagSpecsArgumentMatcher implements ArgumentMatcher> { private List left; @@ -15,11 +17,11 @@ public TagSpecsArgumentMatcher(List tagSpecsList) { @Override public boolean matches(List right) { - if(left.size() != right.size()) { + if (left.size() != right.size()) { return false; } - return left.get(0).entityName().equals(right.get(0).entityName()) && - left.get(0).entityType().equals(right.get(0).entityType()) && - left.get(0).typeName().equals(right.get(0).typeName()); + return left.get(0).entityName().equals(right.get(0).entityName()) + && left.get(0).entityType().equals(right.get(0).entityType()) + && left.get(0).typeName().equals(right.get(0).typeName()); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index 6b92d0f7..ef7ab7c7 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -1,11 +1,28 @@ package com.michelin.ns4kafka.services.executors; +import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.CLUSTER_ID; +import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.argThat; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -13,15 +30,6 @@ import org.mockito.junit.jupiter.MockitoExtension; import reactor.core.publisher.Mono; -import java.util.*; - -import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.CLUSTER_ID; -import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class TopicAsyncExecutorTest { @@ -69,15 +77,18 @@ void createTagsShouldAddTags() { topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); List tagSpecsList = new ArrayList<>(); - TagSpecs tagSpecs = TagSpecs.builder().typeName(TAG1).entityName(CLUSTER_ID_TEST+":"+TOPIC_NAME).entityType(TOPIC_ENTITY_TYPE).build(); + TagSpecs tagSpecs = TagSpecs.builder().typeName(TAG1) + .entityName(CLUSTER_ID_TEST + ":" + TOPIC_NAME) + .entityType(TOPIC_ENTITY_TYPE).build(); tagSpecsList.add(tagSpecs); - verify(schemaRegistryClient, times(1)).addTags(eq(LOCAL_CLUSTER), argThat(new TagSpecsArgumentMatcher(tagSpecsList))); + verify(schemaRegistryClient, times(1)) + .addTags(eq(LOCAL_CLUSTER), argThat(new TagSpecsArgumentMatcher(tagSpecsList))); } @Test void createTagsShouldNotAddTags() { Properties properties = new Properties(); - properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); List ns4kafkaTopics = new ArrayList<>(); @@ -104,10 +115,11 @@ void createTagsShouldNotAddTags() { @Test void deleteTagsShouldDeleteTags() { Properties properties = new Properties(); - properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - when(schemaRegistryClient.deleteTag(anyString(),anyString(),anyString())).thenReturn(Mono.just(new HttpResponseMock())); + when(schemaRegistryClient.deleteTag(anyString(), anyString(), anyString())) + .thenReturn(Mono.just(new HttpResponseMock())); when(managedClusterProperties.getConfig()).thenReturn(properties); when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); @@ -124,18 +136,19 @@ void deleteTagsShouldDeleteTags() { .metadata(ObjectMeta.builder() .name(TOPIC_NAME).build()) .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1,TAG2)).build()).build(); + .tags(List.of(TAG1, TAG2)).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); - verify(schemaRegistryClient, times(1)).deleteTag(eq(LOCAL_CLUSTER),eq(CLUSTER_ID_TEST+":"+TOPIC_NAME),eq(TAG1)); + verify(schemaRegistryClient, times(1)) + .deleteTag(eq(LOCAL_CLUSTER), eq(CLUSTER_ID_TEST + ":" + TOPIC_NAME), eq(TAG1)); } @Test void deleteTagsShouldNotDeleteTags() { Properties properties = new Properties(); - properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); List ns4kafkaTopics = new ArrayList<>(); @@ -156,18 +169,19 @@ void deleteTagsShouldNotDeleteTags() { topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); - verify(schemaRegistryClient, times(0)).deleteTag(anyString(),anyString(),anyString()); + verify(schemaRegistryClient, times(0)).deleteTag(anyString(), anyString(), anyString()); } @Test void completeWithTagsShouldComplete() { Properties properties = new Properties(); - properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); - when(schemaRegistryClient.getTopicWithTags(anyString(),anyString())).thenReturn(Mono.just(List.of(tagTopicInfo))); + when(schemaRegistryClient.getTopicWithTags(anyString(), anyString())) + .thenReturn(Mono.just(List.of(tagTopicInfo))); when(managedClusterProperties.getConfig()).thenReturn(properties); when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); @@ -181,13 +195,13 @@ void completeWithTagsShouldComplete() { topicAsyncExecutor.enrichWithTags(brokerTopics); - assertEquals(TAG1,brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); + assertEquals(TAG1, brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); } @Test void completeWithTagsShouldNotComplete() { Properties properties = new Properties(); - properties.put(CLUSTER_ID,CLUSTER_ID_TEST); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); From b8b367723724ef7291cde7a9d908493237f1f0bd Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 15:32:00 +0200 Subject: [PATCH 13/27] Manage Confluent tags --- .../ns4kafka/controllers/TopicControllerTest.java | 15 +++++++++++++++ .../ns4kafka/services/TopicServiceTest.java | 6 +++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index bc2fe34d..c07d3f0e 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -30,6 +30,7 @@ import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -234,6 +235,7 @@ void createNewTopic() throws InterruptedException, ExecutionException, TimeoutEx .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -277,6 +279,7 @@ void shouldCreateNewTopicWithNoConstraint() throws InterruptedException, Executi .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -319,6 +322,7 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -332,6 +336,7 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -500,6 +505,7 @@ void updateTopicValidationErrors() { .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -513,6 +519,7 @@ void updateTopicValidationErrors() { .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(6) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -553,6 +560,7 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -566,6 +574,7 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -602,6 +611,7 @@ void createNewTopicDryRun() throws InterruptedException, ExecutionException, Tim .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -636,6 +646,7 @@ void createNewTopicFailValidation() { .spec(Topic.TopicSpec.builder() .replicationFactor(1) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -671,6 +682,7 @@ void shouldNotFailWhenCreatingNewTopicWithNoValidator() .spec(Topic.TopicSpec.builder() .replicationFactor(1) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -706,6 +718,7 @@ void shouldNotFailWhenCreatingNewTopicWithNoValidationConstraint() .spec(Topic.TopicSpec.builder() .replicationFactor(1) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -740,6 +753,7 @@ void createNewTopicFailQuotaValidation() { .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -1093,6 +1107,7 @@ void createCollidingTopic() throws InterruptedException, ExecutionException, Tim .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 3d8c97fa..627e1d44 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -929,7 +929,7 @@ void shouldTagsBeInvalidWhenNotConfluentCloud() { List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); - assertEquals("Tags can only be used on confluent clusters.", validationErrors.get(0)); + assertEquals("Invalid value (TAG_TEST) for tags: Tags are not currently supported.", validationErrors.get(0)); } @Test @@ -954,7 +954,7 @@ void shouldTagsBeInvalidWhenNoTagsAllowed() { List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); assertEquals( - "Invalid value (TAG_TEST) for tags: No tags defined on the kafka cluster.", + "Invalid value (TAG_TEST) for tags: No tags allowed.", validationErrors.get(0)); } @@ -970,7 +970,7 @@ void shouldTagsBeInvalidWhenNotAllowed() { Topic topic = Topic.builder() .metadata(ObjectMeta.builder().name("ns-topic1").build()) .spec(Topic.TopicSpec.builder() - .tags(List.of("TAG_TEST")).build()) + .tags(List.of("BAD_TAG")).build()) .build(); List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); From aa10df62b1c5d96d1b50cd43e7eb2fda791a2733 Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 15:45:19 +0200 Subject: [PATCH 14/27] Manage Confluent tags --- .../com/michelin/ns4kafka/integration/ConnectTest.java | 5 +++++ .../com/michelin/ns4kafka/integration/TopicTest.java | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java b/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java index 681b4ba2..f0da94da 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java @@ -42,6 +42,7 @@ import jakarta.inject.Inject; import java.net.MalformedURLException; import java.net.URL; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -206,6 +207,7 @@ void deployConnectors() throws InterruptedException, MalformedURLException { .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -323,6 +325,7 @@ void updateConnectorsWithNullProperty() throws InterruptedException, MalformedUR .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -372,6 +375,7 @@ void restartConnector() throws InterruptedException { .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -426,6 +430,7 @@ void pauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java b/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java index 9f09a446..e53dc426 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java @@ -40,6 +40,7 @@ import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -218,6 +219,7 @@ void createTopic() throws InterruptedException, ExecutionException { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -260,6 +262,7 @@ void updateTopic() throws InterruptedException, ExecutionException { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -284,6 +287,7 @@ void updateTopic() throws InterruptedException, ExecutionException { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "70000"))//This line was changed @@ -329,6 +333,7 @@ void invalidTopicName() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -370,6 +375,7 @@ void updateTopicNoChange() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -387,6 +393,7 @@ void updateTopicNoChange() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "90000")) @@ -415,6 +422,7 @@ void testDeleteRecords() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -477,6 +485,7 @@ void testDeleteRecordsCompactTopic() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "1", "retention.ms", "60000")) From 83d900123ec29533a3a46a1d1a2bc4db2758eb9a Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 16:18:16 +0200 Subject: [PATCH 15/27] Manage Confluent tags --- .../michelin/ns4kafka/controllers/topic/TopicController.java | 2 +- .../ns4kafka/services/executors/TopicAsyncExecutorTest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index bea5fd6a..63a5df76 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -109,7 +109,7 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); } - List existingTags = existingTopic.isPresent() && existingTopic.get().getSpec().getTags() != null + List existingTags = existingTopic.isPresent() ? existingTopic.get().getSpec().getTags() : Collections.emptyList(); if (topic.getSpec().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index ef7ab7c7..fe7703a0 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -142,7 +142,7 @@ void deleteTagsShouldDeleteTags() { topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); verify(schemaRegistryClient, times(1)) - .deleteTag(eq(LOCAL_CLUSTER), eq(CLUSTER_ID_TEST + ":" + TOPIC_NAME), eq(TAG1)); + .deleteTag(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME, TAG1); } @Test From 55b8c8ed72d8d570a96ee34e5b7bc29134b836e5 Mon Sep 17 00:00:00 2001 From: E046899 Date: Tue, 3 Oct 2023 16:58:52 +0200 Subject: [PATCH 16/27] Manage Confluent tags --- .../executors/TopicAsyncExecutor.java | 54 +++++++++++-------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 9e9c2f83..eca9501c 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -135,7 +135,7 @@ public void synchronizeTopics() { createTopics(toCreate); alterTopics(toUpdate, toCheckConf); - if (managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { + if (isConfluent()) { createTags(ns4kafkaTopics, brokerTopics); deleteTags(ns4kafkaTopics, brokerTopics); } @@ -154,25 +154,23 @@ public void synchronizeTopics() { * @param brokerTopics Topics from broker */ public void createTags(List ns4kafkaTopics, Map brokerTopics) { - List tagsToCreate = ns4kafkaTopics.stream().flatMap(ns4kafkaTopic -> { - Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); - - List existingTags = brokerTopic != null && brokerTopic.getSpec().getTags() != null - ? brokerTopic.getSpec().getTags() - : Collections.emptyList(); - List newTags = ns4kafkaTopic.getSpec().getTags() != null - ? ns4kafkaTopic.getSpec().getTags() - : Collections.emptyList(); - - return newTags.stream().filter(tag -> !existingTags.contains(tag)).map(tag -> TagSpecs.builder() - .entityName( - managedClusterProperties.getConfig().getProperty(CLUSTER_ID) - + ":" - + ns4kafkaTopic.getMetadata().getName()) - .typeName(tag) - .entityType(TOPIC_ENTITY_TYPE) - .build()); - }).toList(); + List tagsToCreate = ns4kafkaTopics + .stream() + .filter(topic -> topic.getMetadata().getGeneration() == 1) + .flatMap(ns4kafkaTopic -> { + Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); + Set existingTags = brokerTopic != null ? new HashSet<>(brokerTopic.getSpec().getTags()) : Collections.emptySet(); + Set newTags = new HashSet<>(ns4kafkaTopic.getSpec().getTags()); + newTags.removeAll(existingTags); + return newTags + .stream() + .map(tag -> TagSpecs.builder() + .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + ns4kafkaTopic.getMetadata().getName()) + .typeName(tag) + .entityType(TOPIC_ENTITY_TYPE) + .build()); + }) + .toList(); if (!tagsToCreate.isEmpty()) { String stringTags = String.join(",", tagsToCreate @@ -225,6 +223,16 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi managedClusterProperties.getName(), tag.entityName(), tag.typeName()).block()); + + if (!tagsToDelete.isEmpty()) { + tagsToDelete + .forEach(tag -> schemaRegistryClient.deleteTag( + managedClusterProperties.getName(), + tag.entityName(), + tag.typeName()) + .subscribe(success -> log.debug(String.format("Success deleting tag %s.", tag)), + error -> log.error(String.format("Error deleting tag %s.", tag)))); + } } /** @@ -266,7 +274,7 @@ public List listBrokerTopicNames() throws InterruptedException, Executio * @param topics Topics to complete */ public void enrichWithTags(Map topics) { - if (managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { + if (isConfluent()) { topics.forEach((key, value) -> value.getSpec().setTags(schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), managedClusterProperties @@ -277,6 +285,10 @@ public void enrichWithTags(Map topics) { } } + public boolean isConfluent() { + return managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + } + /** * Collect all topics on broker from a list of topic names. * From 067141febb17608f8a0c0be1b475bac21e230e19 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 4 Oct 2023 09:52:14 +0200 Subject: [PATCH 17/27] Manage Confluent tags --- .../executors/TopicAsyncExecutor.java | 33 ++++++++++++------- .../controllers/TopicControllerTest.java | 1 + .../executors/TopicAsyncExecutorTest.java | 10 ++++-- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index eca9501c..2c6aee95 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -135,10 +135,8 @@ public void synchronizeTopics() { createTopics(toCreate); alterTopics(toUpdate, toCheckConf); - if (isConfluent()) { - createTags(ns4kafkaTopics, brokerTopics); - deleteTags(ns4kafkaTopics, brokerTopics); - } + manageTags(ns4kafkaTopics, brokerTopics); + } catch (ExecutionException | TimeoutException | CancellationException | KafkaStoreException e) { log.error("Error", e); } catch (InterruptedException e) { @@ -147,6 +145,19 @@ public void synchronizeTopics() { } } + /** + * Manage tags for creation and deletion. + * + * @param ns4kafkaTopics Topics from ns4kafka + * @param brokerTopics Topics from broker + */ + public void manageTags(List ns4kafkaTopics, Map brokerTopics) { + if (isConfluent()) { + createTags(ns4kafkaTopics, brokerTopics); + deleteTags(ns4kafkaTopics, brokerTopics); + } + } + /** * Create tags. * @@ -159,13 +170,17 @@ public void createTags(List ns4kafkaTopics, Map brokerTopi .filter(topic -> topic.getMetadata().getGeneration() == 1) .flatMap(ns4kafkaTopic -> { Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); - Set existingTags = brokerTopic != null ? new HashSet<>(brokerTopic.getSpec().getTags()) : Collections.emptySet(); + Set existingTags = brokerTopic != null + ? new HashSet<>(brokerTopic.getSpec().getTags()) + : Collections.emptySet(); Set newTags = new HashSet<>(ns4kafkaTopic.getSpec().getTags()); newTags.removeAll(existingTags); return newTags .stream() .map(tag -> TagSpecs.builder() - .entityName(managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + ns4kafkaTopic.getMetadata().getName()) + .entityName( + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + + ":" + ns4kafkaTopic.getMetadata().getName()) .typeName(tag) .entityType(TOPIC_ENTITY_TYPE) .build()); @@ -218,12 +233,6 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi .build()); }).toList(); - tagsToDelete.forEach(tag -> - schemaRegistryClient.deleteTag( - managedClusterProperties.getName(), - tag.entityName(), - tag.typeName()).block()); - if (!tagsToDelete.isEmpty()) { tagsToDelete .forEach(tag -> schemaRegistryClient.deleteTag( diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index c07d3f0e..10a80192 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -383,6 +383,7 @@ void updateTopicWithNewTags() throws InterruptedException, ExecutionException, T .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) + .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index fe7703a0..a95907fd 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -53,6 +53,7 @@ void createTagsShouldAddTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); + managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); when(schemaRegistryClient.addTags(anyString(), anyList())).thenReturn(Mono.just(new ArrayList<>())); when(managedClusterProperties.getConfig()).thenReturn(properties); @@ -61,7 +62,7 @@ void createTagsShouldAddTags() { List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) + .name(TOPIC_NAME).generation(1).build()) .spec(Topic.TopicSpec.builder() .tags(List.of(TAG1)).build()).build(); ns4kafkaTopics.add(ns4kafkaTopic); @@ -74,7 +75,7 @@ void createTagsShouldAddTags() { .tags(List.of(TAG2)).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); - topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); + topicAsyncExecutor.manageTags(ns4kafkaTopics, brokerTopics); List tagSpecsList = new ArrayList<>(); TagSpecs tagSpecs = TagSpecs.builder().typeName(TAG1) @@ -90,6 +91,7 @@ void createTagsShouldNotAddTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); + managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -117,6 +119,7 @@ void deleteTagsShouldDeleteTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); + managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); when(schemaRegistryClient.deleteTag(anyString(), anyString(), anyString())) .thenReturn(Mono.just(new HttpResponseMock())); @@ -150,6 +153,7 @@ void deleteTagsShouldNotDeleteTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); + managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -177,6 +181,7 @@ void completeWithTagsShouldComplete() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); + managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); @@ -203,6 +208,7 @@ void completeWithTagsShouldNotComplete() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); + managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); From a41b35e3d6991d942dbbcf45d12f4f346c92c963 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 4 Oct 2023 13:29:59 +0200 Subject: [PATCH 18/27] Manage Confluent tags --- .../services/executors/TopicAsyncExecutorTest.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index a95907fd..ec8baa93 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -19,6 +19,7 @@ import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -53,11 +54,11 @@ void createTagsShouldAddTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); when(schemaRegistryClient.addTags(anyString(), anyList())).thenReturn(Mono.just(new ArrayList<>())); when(managedClusterProperties.getConfig()).thenReturn(properties); when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -72,7 +73,7 @@ void createTagsShouldAddTags() { .metadata(ObjectMeta.builder() .name(TOPIC_NAME).build()) .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG2)).build()).build(); + .tags(Collections.emptyList()).build()).build(); brokerTopics.put(TOPIC_NAME, brokerTopic); topicAsyncExecutor.manageTags(ns4kafkaTopics, brokerTopics); @@ -91,7 +92,6 @@ void createTagsShouldNotAddTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -119,7 +119,6 @@ void deleteTagsShouldDeleteTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); when(schemaRegistryClient.deleteTag(anyString(), anyString(), anyString())) .thenReturn(Mono.just(new HttpResponseMock())); @@ -153,7 +152,6 @@ void deleteTagsShouldNotDeleteTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); List ns4kafkaTopics = new ArrayList<>(); Topic ns4kafkaTopic = Topic.builder() @@ -181,7 +179,6 @@ void completeWithTagsShouldComplete() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); @@ -208,7 +205,6 @@ void completeWithTagsShouldNotComplete() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); managedClusterProperties.setConfig(properties); - managedClusterProperties.setProvider(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); From c3207436ec05fb41aef2533fe0e1f29ba1c62967 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 4 Oct 2023 14:40:06 +0200 Subject: [PATCH 19/27] Manage Confluent tags --- .../schema/SchemaRegistryClientTest.java | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java diff --git a/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java b/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java new file mode 100644 index 00000000..46e445f0 --- /dev/null +++ b/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java @@ -0,0 +1,130 @@ +package com.michelin.ns4kafka.services.clients.schema; + +import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.when; + +import com.michelin.ns4kafka.properties.ManagedClusterProperties; +import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; +import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; +import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; +import io.micronaut.core.type.Argument; +import io.micronaut.http.HttpRequest; +import io.micronaut.http.HttpResponse; +import io.micronaut.http.client.HttpClient; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import reactor.core.publisher.Mono; + +/** + * Test schema registry. + */ +@ExtendWith(MockitoExtension.class) +public class SchemaRegistryClientTest { + + private static final String KAFKA_CLUSTER = "local"; + private static final String TAG = "TAG"; + private static final String ENTITY_NAME = "ENTITY_NAME"; + + @Mock + HttpClient httpClient; + + @Mock + List managedClusterPropertiesList; + + @InjectMocks + SchemaRegistryClient schemaRegistryClient; + + static ManagedClusterProperties.SchemaRegistryProperties schemaRegistryProperties; + + static ManagedClusterProperties managedClusterProperties; + + @BeforeAll + static void init() { + schemaRegistryProperties = new ManagedClusterProperties.SchemaRegistryProperties(); + schemaRegistryProperties.setUrl("URL"); + schemaRegistryProperties.setBasicAuthUsername("USER"); + schemaRegistryProperties.setBasicAuthPassword("PASSWORD"); + + managedClusterProperties = + new ManagedClusterProperties(KAFKA_CLUSTER, ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + managedClusterProperties.setSchemaRegistry(schemaRegistryProperties); + } + + @Test + void getTagsTest() { + TagInfo tagInfo = TagInfo.builder().name(TAG).build(); + + when(managedClusterPropertiesList.stream()).thenReturn( + Stream.of(managedClusterProperties)); + when(httpClient.retrieve((HttpRequest) any(), (Argument) any())) + .thenReturn(Mono.just(List.of(tagInfo))); + + Mono> tagInfos = schemaRegistryClient.getTags(KAFKA_CLUSTER); + + assertEquals(TAG, tagInfos.block().get(0).name()); + } + + @Test + void getTopicWithTagsTest() { + TagTopicInfo tagTopicInfo = TagTopicInfo.builder() + .entityType(TOPIC_ENTITY_TYPE) + .typeName(TAG) + .entityName(ENTITY_NAME).build(); + + when(managedClusterPropertiesList.stream()).thenReturn( + Stream.of(managedClusterProperties)); + when(httpClient.retrieve((HttpRequest) any(), (Argument) any())) + .thenReturn(Mono.just(List.of(tagTopicInfo))); + + Mono> tagInfos = schemaRegistryClient.getTopicWithTags(KAFKA_CLUSTER, ENTITY_NAME); + + assertEquals(ENTITY_NAME, tagInfos.block().get(0).entityName()); + assertEquals(TAG, tagInfos.block().get(0).typeName()); + assertEquals(TOPIC_ENTITY_TYPE, tagInfos.block().get(0).entityType()); + } + + @Test + void addTagsTest() { + TagTopicInfo tagTopicInfo = TagTopicInfo.builder() + .entityType(TOPIC_ENTITY_TYPE) + .entityName(ENTITY_NAME) + .typeName(TAG).build(); + + TagSpecs tagSpecs = TagSpecs.builder() + .entityType(TOPIC_ENTITY_TYPE) + .entityName(ENTITY_NAME) + .typeName(TAG).build(); + + when(managedClusterPropertiesList.stream()).thenReturn( + Stream.of(managedClusterProperties)); + when(httpClient.retrieve((HttpRequest) any(), (Argument) any())) + .thenReturn(Mono.just(List.of(tagTopicInfo))); + + Mono> tagInfos = schemaRegistryClient.addTags(KAFKA_CLUSTER, List.of(tagSpecs)); + + assertEquals(ENTITY_NAME, tagInfos.block().get(0).entityName()); + assertEquals(TAG, tagInfos.block().get(0).typeName()); + assertEquals(TOPIC_ENTITY_TYPE, tagInfos.block().get(0).entityType()); + } + + @Test + void deleteTagTest() { + when(managedClusterPropertiesList.stream()).thenReturn( + Stream.of(managedClusterProperties)); + when(httpClient.exchange((HttpRequest) any(), (Class) any())) + .thenReturn(Mono.just(HttpResponse.accepted())); + + Mono> deleteInfo = schemaRegistryClient.deleteTag(KAFKA_CLUSTER, ENTITY_NAME, TAG); + + assertNotNull(deleteInfo); + } +} From dd3c6618037dc5ba23593656b83560b2c0ae6d51 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 4 Oct 2023 14:59:00 +0200 Subject: [PATCH 20/27] Manage Confluent tags --- .../services/clients/schema/SchemaRegistryClientTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java b/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java index 46e445f0..5c6e937f 100644 --- a/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java @@ -28,7 +28,7 @@ * Test schema registry. */ @ExtendWith(MockitoExtension.class) -public class SchemaRegistryClientTest { +class SchemaRegistryClientTest { private static final String KAFKA_CLUSTER = "local"; private static final String TAG = "TAG"; From 509265dd511ac6f81b24b80a2f180f6e89226dd6 Mon Sep 17 00:00:00 2001 From: E046899 Date: Wed, 4 Oct 2023 17:04:12 +0200 Subject: [PATCH 21/27] Manage Confluent tags --- .../services/clients/schema/SchemaRegistryClient.java | 11 ++++++++--- .../services/executors/TopicAsyncExecutor.java | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java index 41a4e20a..56ce74ed 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java @@ -36,6 +36,8 @@ public class SchemaRegistryClient { private static final String SUBJECTS = "/subjects/"; private static final String CONFIG = "/config/"; + private static final String ACCEPT_HEADER = "ACCEPT"; + private static final String JSON_HEADER = "application/json"; @Inject @Client(id = "schema-registry") @@ -189,7 +191,8 @@ public Mono> getTags(String kafkaCluster) { URI.create(StringUtils.prependUri( config.getUrl(), "/catalog/v1/types/tagdefs"))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()) + .header(ACCEPT_HEADER, JSON_HEADER); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagInfo.class))); } @@ -207,7 +210,8 @@ public Mono> getTopicWithTags(String kafkaCluster, String ent URI.create(StringUtils.prependUri( config.getUrl(), "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()) + .header(ACCEPT_HEADER, JSON_HEADER); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); } @@ -225,7 +229,8 @@ public Mono> addTags(String kafkaCluster, List tagS URI.create(StringUtils.prependUri( config.getUrl(), "/catalog/v1/entity/tags")), tagSpecs) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()) + .header(ACCEPT_HEADER, JSON_HEADER); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 2c6aee95..c00827d4 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -167,7 +167,7 @@ public void manageTags(List ns4kafkaTopics, Map brokerTopi public void createTags(List ns4kafkaTopics, Map brokerTopics) { List tagsToCreate = ns4kafkaTopics .stream() - .filter(topic -> topic.getMetadata().getGeneration() == 1) + .filter(ns4kafkaTopic -> brokerTopics.get(ns4kafkaTopic.getMetadata().getName()) != null) .flatMap(ns4kafkaTopic -> { Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); Set existingTags = brokerTopic != null From 6d4f680f42131d3b9e86f3be71f53f3bb39ffdb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Greffier?= Date: Sun, 8 Oct 2023 01:36:39 +0200 Subject: [PATCH 22/27] Update tags creation and deletion --- README.md | 2 +- .../controllers/topic/TopicController.java | 9 +- .../com/michelin/ns4kafka/models/Topic.java | 6 +- .../ns4kafka/services/TopicService.java | 24 +- .../clients/schema/SchemaRegistryClient.java | 53 ++- .../clients/schema/entities/TagSpecs.java | 20 -- .../clients/schema/entities/TagTopicInfo.java | 7 +- .../AccessControlEntryAsyncExecutor.java | 19 +- .../executors/ConnectorAsyncExecutor.java | 19 +- .../executors/ConsumerGroupAsyncExecutor.java | 6 +- .../KafkaAsyncExecutorScheduler.java | 4 + .../executors/TopicAsyncExecutor.java | 319 +++++++++--------- .../services/executors/UserAsyncExecutor.java | 11 +- .../controllers/TopicControllerTest.java | 183 ++++------ .../ns4kafka/integration/ConnectTest.java | 5 - .../ns4kafka/integration/TopicTest.java | 9 - .../ns4kafka/services/TopicServiceTest.java | 92 ++--- .../schema/SchemaRegistryClientTest.java | 130 ------- .../services/executors/HttpResponseMock.java | 42 --- .../executors/TagSpecsArgumentMatcher.java | 27 -- .../executors/TopicAsyncExecutorTest.java | 200 +++-------- 21 files changed, 405 insertions(+), 782 deletions(-) delete mode 100644 src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java delete mode 100644 src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java delete mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java delete mode 100644 src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java diff --git a/README.md b/README.md index 0ed01447..b0b413cb 100644 --- a/README.md +++ b/README.md @@ -212,7 +212,7 @@ of your namespace descriptors. | drop-unsync-acls | boolean | Should Ns4Kafka drop unsynchronized ACLs | | provider | boolean | The kind of cluster. Either SELF_MANAGED or CONFLUENT_CLOUD | | config.bootstrap.servers | string | The location of the clusters servers | -| config.cluster.id | string | The confluent cloud cluster id to manage tags | +| config.cluster.id | string | The cluster id. Required to use Confluent Cloud tags. | | schema-registry.url | string | The location of the Schema Registry | | schema-registry.basicAuthUsername | string | Basic authentication username to the Schema Registry | | schema-registry.basicAuthPassword | string | Basic authentication password to the Schema Registry | diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index 63a5df76..89d63d66 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -29,7 +29,6 @@ import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; import org.apache.kafka.common.TopicPartition; /** @@ -109,9 +108,9 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); } - List existingTags = existingTopic.isPresent() - ? existingTopic.get().getSpec().getTags() - : Collections.emptyList(); + List existingTags = existingTopic + .map(oldTopic -> oldTopic.getSpec().getTags()) + .orElse(Collections.emptyList()); if (topic.getSpec().getTags().stream().anyMatch(newTag -> !existingTags.contains(newTag))) { validationErrors.addAll(topicService.validateTags(ns, topic)); } @@ -276,6 +275,6 @@ public List deleteRecords(String namespace, String topic, .offset(entry.getValue()) .build()) .build()) - .collect(Collectors.toList()); + .toList(); } } diff --git a/src/main/java/com/michelin/ns4kafka/models/Topic.java b/src/main/java/com/michelin/ns4kafka/models/Topic.java index 333daf90..f3856741 100644 --- a/src/main/java/com/michelin/ns4kafka/models/Topic.java +++ b/src/main/java/com/michelin/ns4kafka/models/Topic.java @@ -8,6 +8,7 @@ import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; import java.time.Instant; +import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; @@ -35,6 +36,7 @@ public class Topic { @NotNull private ObjectMeta metadata; + @Valid @NotNull private TopicSpec spec; @@ -55,13 +57,15 @@ public enum TopicPhase { */ @Data @Builder + @Introspected @NoArgsConstructor @AllArgsConstructor public static class TopicSpec { private int replicationFactor; private int partitions; + @Builder.Default @JsonSetter(nulls = Nulls.AS_EMPTY) - private List tags; + private List tags = new ArrayList<>(); private Map configs; } diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index d28ab3f5..ac262e9b 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -329,39 +329,39 @@ public Map deleteRecords(Topic topic, Map validateTags(Namespace namespace, Topic topic) { List validationErrors = new ArrayList<>(); Optional topicCluster = managedClusterProperties - .stream() - .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) - .findFirst(); + .stream() + .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) + .findFirst(); if (topicCluster.isPresent() - && !topicCluster.get().getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { + && !topicCluster.get().getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD)) { validationErrors.add(String.format( - "Invalid value (%s) for tags: Tags are not currently supported.", - String.join(",", topic.getSpec().getTags()))); + "Invalid value %s for tags: Tags are not currently supported.", + String.join(", ", topic.getSpec().getTags()))); return validationErrors; } Set tagNames = schemaRegistryClient.getTags(namespace.getMetadata().getCluster()) - .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); + .map(tags -> tags.stream().map(TagInfo::name).collect(Collectors.toSet())).block(); if (tagNames == null || tagNames.isEmpty()) { validationErrors.add(String.format( - "Invalid value (%s) for tags: No tags allowed.", - String.join(",", topic.getSpec().getTags()))); + "Invalid value %s for tags: No tags allowed.", + String.join(", ", topic.getSpec().getTags()))); return validationErrors; } if (!tagNames.containsAll(topic.getSpec().getTags())) { validationErrors.add(String.format( - "Invalid value (%s) for tags: Available tags are (%s).", - String.join(",", topic.getSpec().getTags()), String.join(",", tagNames))); + "Invalid value %s for tags: Available tags are %s.", + String.join(", ", topic.getSpec().getTags()), String.join(",", tagNames))); } return validationErrors; diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java index 56ce74ed..a7093c29 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java @@ -7,7 +7,6 @@ import com.michelin.ns4kafka.services.clients.schema.entities.SchemaRequest; import com.michelin.ns4kafka.services.clients.schema.entities.SchemaResponse; import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; -import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.core.type.Argument; @@ -36,8 +35,6 @@ public class SchemaRegistryClient { private static final String SUBJECTS = "/subjects/"; private static final String CONFIG = "/config/"; - private static final String ACCEPT_HEADER = "ACCEPT"; - private static final String JSON_HEADER = "application/json"; @Inject @Client(id = "schema-registry") @@ -187,12 +184,9 @@ public Mono deleteCurrentCompatibilityBySubject(Str public Mono> getTags(String kafkaCluster) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest - .GET( - URI.create(StringUtils.prependUri( - config.getUrl(), - "/catalog/v1/types/tagdefs"))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()) - .header(ACCEPT_HEADER, JSON_HEADER); + .GET(URI.create(StringUtils.prependUri( + config.getUrl(), "/catalog/v1/types/tagdefs"))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagInfo.class))); } @@ -200,18 +194,16 @@ public Mono> getTags(String kafkaCluster) { * List tags of a topic. * * @param kafkaCluster The Kafka cluster - * @param entityName The topic's name for the API + * @param entityName The topic's name for the API * @return A list of tags */ public Mono> getTopicWithTags(String kafkaCluster, String entityName) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest - .GET( - URI.create(StringUtils.prependUri( - config.getUrl(), - "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()) - .header(ACCEPT_HEADER, JSON_HEADER); + .GET(URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags"))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); } @@ -219,18 +211,16 @@ public Mono> getTopicWithTags(String kafkaCluster, String ent * Add a tag to a topic. * * @param kafkaCluster The Kafka cluster - * @param tagSpecs Tags to add + * @param tagSpecs Tags to add * @return Information about added tags */ - public Mono> addTags(String kafkaCluster, List tagSpecs) { + public Mono> addTags(String kafkaCluster, List tagSpecs) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest - .POST( - URI.create(StringUtils.prependUri( - config.getUrl(), - "/catalog/v1/entity/tags")), tagSpecs) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()) - .header(ACCEPT_HEADER, JSON_HEADER); + .POST(URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/entity/tags")), tagSpecs) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(TagTopicInfo.class))); } @@ -238,18 +228,17 @@ public Mono> addTags(String kafkaCluster, List tagS * Delete a tag to a topic. * * @param kafkaCluster The Kafka cluster - * @param entityName The topic's name - * @param tagName The tag to delete + * @param entityName The topic's name + * @param tagName The tag to delete * @return The resume response */ - public Mono> deleteTag(String kafkaCluster, String entityName, String tagName) { + public Mono> deleteTag(String kafkaCluster, String entityName, String tagName) { ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest - .DELETE( - URI.create(StringUtils.prependUri( - config.getUrl(), - "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags/" + tagName))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + .DELETE(URI.create(StringUtils.prependUri( + config.getUrl(), + "/catalog/v1/entity/type/kafka_topic/name/" + entityName + "/tags/" + tagName))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.exchange(request, Void.class)); } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java deleted file mode 100644 index 42bcc36b..00000000 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagSpecs.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.michelin.ns4kafka.services.clients.schema.entities; - -import lombok.Builder; - -/** - * Tag Specs to call schema registry API. - * - * @param entityName The entity name - * @param entityType The entity type - * @param typeName The type name - */ -@Builder -public record TagSpecs(String entityName, String entityType, String typeName) { - - @Override - public String toString() { - return entityName + "/" + typeName; - } - -} diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java index fd5666ae..891b7f6b 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/TagTopicInfo.java @@ -5,9 +5,9 @@ /** * Information on tag. * - * @param entityName The entity name - * @param entityType The entity type - * @param typeName The type name + * @param entityName The entity name + * @param entityType The entity type + * @param typeName The type name * @param entityStatus The entity status */ @Builder @@ -17,5 +17,4 @@ public record TagTopicInfo(String entityName, String entityType, String typeName public String toString() { return entityName + "/" + typeName; } - } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java index d22ef3ca..a032703f 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java @@ -15,7 +15,6 @@ import com.michelin.ns4kafka.services.ConnectorService; import com.michelin.ns4kafka.services.StreamService; import io.micronaut.context.annotation.EachBean; -import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.util.ArrayList; import java.util.List; @@ -24,6 +23,7 @@ import java.util.concurrent.TimeoutException; import java.util.function.Function; import java.util.stream.Stream; +import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.common.acl.AclBinding; @@ -40,26 +40,19 @@ @Slf4j @EachBean(ManagedClusterProperties.class) @Singleton +@AllArgsConstructor public class AccessControlEntryAsyncExecutor { private static final String USER_PRINCIPAL = "User:"; private final ManagedClusterProperties managedClusterProperties; - @Inject - AccessControlEntryService accessControlEntryService; + private AccessControlEntryService accessControlEntryService; - @Inject - StreamService streamService; + private StreamService streamService; - @Inject - ConnectorService connectorService; + private ConnectorService connectorService; - @Inject - NamespaceRepository namespaceRepository; - - public AccessControlEntryAsyncExecutor(ManagedClusterProperties managedClusterProperties) { - this.managedClusterProperties = managedClusterProperties; - } + private NamespaceRepository namespaceRepository; /** * Run the ACLs synchronization. diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java index 09464a1c..3a06111e 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java @@ -12,13 +12,13 @@ import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorStatus; import io.micronaut.context.annotation.EachBean; import io.micronaut.http.client.exceptions.HttpClientResponseException; -import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Stream; +import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -29,20 +29,18 @@ @Slf4j @EachBean(ManagedClusterProperties.class) @Singleton +@AllArgsConstructor public class ConnectorAsyncExecutor { - private final ManagedClusterProperties managedClusterProperties; private final Set healthyConnectClusters = new HashSet<>(); private final Set idleConnectClusters = new HashSet<>(); - @Inject + + private final ManagedClusterProperties managedClusterProperties; + private ConnectorRepository connectorRepository; - @Inject + private KafkaConnectClient kafkaConnectClient; - @Inject - private ConnectClusterService connectClusterService; - public ConnectorAsyncExecutor(ManagedClusterProperties managedClusterProperties) { - this.managedClusterProperties = managedClusterProperties; - } + private ConnectClusterService connectClusterService; /** * Run the connector synchronization. @@ -101,7 +99,6 @@ private Flux checkConnectClusterHealth() { }); } - /** * For each connect cluster, start the synchronization of connectors. */ @@ -141,7 +138,7 @@ private Flux synchronizeConnectCluster(String connectCluster) { managedClusterProperties.getName(), connectCluster); } else { log.error( - "Exception during connectors synchronization for Kafka cluster {} and Kafka Connect {}: {}.", + "Error during connectors synchronization for Kafka cluster {} and Kafka Connect {}: {}.", managedClusterProperties.getName(), connectCluster, error.getMessage()); } }) diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java index 04b2e41f..372728e8 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java @@ -10,6 +10,7 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; +import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.ConsumerGroupDescription; @@ -23,13 +24,10 @@ @Slf4j @EachBean(ManagedClusterProperties.class) @Singleton +@AllArgsConstructor public class ConsumerGroupAsyncExecutor { private final ManagedClusterProperties managedClusterProperties; - public ConsumerGroupAsyncExecutor(ManagedClusterProperties managedClusterProperties) { - this.managedClusterProperties = managedClusterProperties; - } - /** * Getter for Kafka Admin client. * diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java b/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java index c387d963..233e57d8 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java @@ -18,12 +18,16 @@ @Singleton public class KafkaAsyncExecutorScheduler { private final AtomicBoolean ready = new AtomicBoolean(false); + @Inject List topicAsyncExecutors; + @Inject List accessControlEntryAsyncExecutors; + @Inject List connectorAsyncExecutors; + @Inject List userAsyncExecutors; diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index c00827d4..3fc416b5 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -6,10 +6,8 @@ import com.michelin.ns4kafka.repositories.TopicRepository; import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; -import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import io.micronaut.context.annotation.EachBean; -import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.time.Instant; import java.util.ArrayList; @@ -20,7 +18,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; @@ -51,21 +48,14 @@ @Singleton @AllArgsConstructor public class TopicAsyncExecutor { - public static final String CLUSTER_ID = "cluster.id"; public static final String TOPIC_ENTITY_TYPE = "kafka_topic"; private final ManagedClusterProperties managedClusterProperties; - @Inject - TopicRepository topicRepository; - - @Inject - SchemaRegistryClient schemaRegistryClient; + private TopicRepository topicRepository; - public TopicAsyncExecutor(ManagedClusterProperties managedClusterProperties) { - this.managedClusterProperties = managedClusterProperties; - } + private SchemaRegistryClient schemaRegistryClient; private Admin getAdminClient() { return managedClusterProperties.getAdminClient(); @@ -90,157 +80,97 @@ public void synchronizeTopics() { Map brokerTopics = collectBrokerTopics(); List ns4kafkaTopics = topicRepository.findAllForCluster(managedClusterProperties.getName()); - List toCreate = ns4kafkaTopics.stream() - .filter(topic -> !brokerTopics.containsKey(topic.getMetadata().getName())) - .toList(); - - List toCheckConf = ns4kafkaTopics.stream() - .filter(topic -> brokerTopics.containsKey(topic.getMetadata().getName())) - .toList(); - - Map> toUpdate = toCheckConf.stream() - .map(topic -> { - Map actualConf = - brokerTopics.get(topic.getMetadata().getName()).getSpec().getConfigs(); - Map expectedConf = - topic.getSpec().getConfigs() == null ? Map.of() : topic.getSpec().getConfigs(); - Collection topicConfigChanges = computeConfigChanges(expectedConf, actualConf); - if (!topicConfigChanges.isEmpty()) { - ConfigResource cr = - new ConfigResource(ConfigResource.Type.TOPIC, topic.getMetadata().getName()); - return Map.entry(cr, topicConfigChanges); - } - return null; - }) - .filter(Objects::nonNull) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - if (!toCreate.isEmpty()) { + List createTopics = ns4kafkaTopics.stream() + .filter(topic -> !brokerTopics.containsKey(topic.getMetadata().getName())) + .toList(); + + List checkTopics = ns4kafkaTopics.stream() + .filter(topic -> brokerTopics.containsKey(topic.getMetadata().getName())) + .toList(); + + Map> updateTopics = checkTopics.stream() + .map(topic -> { + Map actualConf = + brokerTopics.get(topic.getMetadata().getName()).getSpec().getConfigs(); + Map expectedConf = + topic.getSpec().getConfigs() == null ? Map.of() : topic.getSpec().getConfigs(); + Collection topicConfigChanges = computeConfigChanges(expectedConf, actualConf); + if (!topicConfigChanges.isEmpty()) { + ConfigResource cr = + new ConfigResource(ConfigResource.Type.TOPIC, topic.getMetadata().getName()); + return Map.entry(cr, topicConfigChanges); + } + return null; + }) + .filter(Objects::nonNull) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + if (!createTopics.isEmpty()) { log.debug("Topic(s) to create: " - + String.join(",", toCreate.stream().map(topic -> topic.getMetadata().getName()).toList())); + + String.join(", ", createTopics.stream().map(topic -> topic.getMetadata().getName()).toList())); } - if (!toUpdate.isEmpty()) { + if (!updateTopics.isEmpty()) { log.debug("Topic(s) to update: " - + String.join(",", toUpdate.keySet().stream().map(ConfigResource::name).toList())); - for (Map.Entry> e : toUpdate.entrySet()) { + + String.join(", ", updateTopics.keySet().stream().map(ConfigResource::name).toList())); + for (Map.Entry> e : updateTopics.entrySet()) { for (AlterConfigOp op : e.getValue()) { log.debug( - e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "(" - + op.configEntry().value() + ")"); + e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "(" + + op.configEntry().value() + ")"); } } } - createTopics(toCreate); - alterTopics(toUpdate, toCheckConf); - - manageTags(ns4kafkaTopics, brokerTopics); + createTopics(createTopics); + alterTopics(updateTopics, checkTopics); + if (isConfluentCloud()) { + alterTags(checkTopics, brokerTopics); + } } catch (ExecutionException | TimeoutException | CancellationException | KafkaStoreException e) { - log.error("Error", e); + log.error("An error occurred during the topic synchronization", e); } catch (InterruptedException e) { - log.error("Error", e); + log.error("Thread interrupted during the topic synchronization", e); Thread.currentThread().interrupt(); } } /** - * Manage tags for creation and deletion. + * Alter tags. * * @param ns4kafkaTopics Topics from ns4kafka - * @param brokerTopics Topics from broker + * @param brokerTopics Topics from broker */ - public void manageTags(List ns4kafkaTopics, Map brokerTopics) { - if (isConfluent()) { - createTags(ns4kafkaTopics, brokerTopics); - deleteTags(ns4kafkaTopics, brokerTopics); - } - } + public void alterTags(List ns4kafkaTopics, Map brokerTopics) { + List tagsToCreate = ns4kafkaTopics + .stream() + .flatMap(topic -> { + Topic brokerTopic = brokerTopics.get(topic.getMetadata().getName()); - /** - * Create tags. - * - * @param ns4kafkaTopics Topics from ns4kafka - * @param brokerTopics Topics from broker - */ - public void createTags(List ns4kafkaTopics, Map brokerTopics) { - List tagsToCreate = ns4kafkaTopics - .stream() - .filter(ns4kafkaTopic -> brokerTopics.get(ns4kafkaTopic.getMetadata().getName()) != null) - .flatMap(ns4kafkaTopic -> { - Topic brokerTopic = brokerTopics.get(ns4kafkaTopic.getMetadata().getName()); - Set existingTags = brokerTopic != null - ? new HashSet<>(brokerTopic.getSpec().getTags()) - : Collections.emptySet(); - Set newTags = new HashSet<>(ns4kafkaTopic.getSpec().getTags()); - newTags.removeAll(existingTags); - return newTags - .stream() - .map(tag -> TagSpecs.builder() - .entityName( - managedClusterProperties.getConfig().getProperty(CLUSTER_ID) - + ":" + ns4kafkaTopic.getMetadata().getName()) - .typeName(tag) - .entityType(TOPIC_ENTITY_TYPE) - .build()); - }) - .toList(); + // Get tags to delete + Set existingTags = new HashSet<>(brokerTopic.getSpec().getTags()); + existingTags.removeAll(Set.copyOf(topic.getSpec().getTags())); + deleteTags(existingTags, topic.getMetadata().getName()); - if (!tagsToCreate.isEmpty()) { - String stringTags = String.join(",", tagsToCreate + // Get tags to create + Set newTags = new HashSet<>(topic.getSpec().getTags()); + newTags.removeAll(Set.copyOf(brokerTopic.getSpec().getTags())); + + return newTags .stream() - .map(Record::toString) - .toList()); - schemaRegistryClient.addTags(managedClusterProperties.getName(), tagsToCreate) - .subscribe(success -> log.debug(String.format("Success creating tag %s.", stringTags)), - error -> log.error(String.format("Error creating tag %s.", stringTags))); - } - } + .map(tag -> TagTopicInfo.builder() + .entityName(managedClusterProperties + .getConfig() + .getProperty(CLUSTER_ID) + ":" + topic.getMetadata().getName()) + .typeName(tag) + .entityType(TOPIC_ENTITY_TYPE) + .build()); + }) + .toList(); - /** - * Delete tags. - * - * @param ns4kafkaTopics Topics from ns4kafka - * @param brokerTopics Topics from broker - */ - public void deleteTags(List ns4kafkaTopics, Map brokerTopics) { - List tagsToDelete = brokerTopics - .values() - .stream() - .flatMap(brokerTopic -> { - Optional newTopic = ns4kafkaTopics - .stream() - .filter(ns4kafkaTopic -> ns4kafkaTopic - .getMetadata() - .getName() - .equals(brokerTopic.getMetadata().getName())) - .findFirst(); - - Set existingTags = new HashSet<>(brokerTopic.getSpec().getTags()); - Set newTags = newTopic.isPresent() - ? new HashSet<>(newTopic.get().getSpec().getTags()) - : Collections.emptySet(); - existingTags.removeAll(newTags); - return existingTags - .stream() - .map(tag -> TagTopicInfo.builder() - .entityName(managedClusterProperties - .getConfig() - .getProperty(CLUSTER_ID) + ":" + brokerTopic.getMetadata().getName()) - .typeName(tag) - .entityType(TOPIC_ENTITY_TYPE) - .build()); - }).toList(); - - if (!tagsToDelete.isEmpty()) { - tagsToDelete - .forEach(tag -> schemaRegistryClient.deleteTag( - managedClusterProperties.getName(), - tag.entityName(), - tag.typeName()) - .subscribe(success -> log.debug(String.format("Success deleting tag %s.", tag)), - error -> log.error(String.format("Error deleting tag %s.", tag)))); + if (!tagsToCreate.isEmpty()) { + createTags(tagsToCreate); } } @@ -250,9 +180,16 @@ public void deleteTags(List ns4kafkaTopics, Map brokerTopi * @param topic The topic to delete */ public void deleteTopic(Topic topic) throws InterruptedException, ExecutionException, TimeoutException { - getAdminClient().deleteTopics(List.of(topic.getMetadata().getName())).all().get(30, TimeUnit.SECONDS); + getAdminClient().deleteTopics(List.of(topic.getMetadata().getName())) + .all() + .get(30, TimeUnit.SECONDS); + log.info("Success deleting topic {} on {}", topic.getMetadata().getName(), - this.managedClusterProperties.getName()); + managedClusterProperties.getName()); + + if (isConfluentCloud() && !topic.getSpec().getTags().isEmpty()) { + deleteTags(topic.getSpec().getTags(), topic.getMetadata().getName()); + } } /** @@ -283,18 +220,24 @@ public List listBrokerTopicNames() throws InterruptedException, Executio * @param topics Topics to complete */ public void enrichWithTags(Map topics) { - if (isConfluent()) { - topics.forEach((key, value) -> - value.getSpec().setTags(schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), - managedClusterProperties - .getConfig() - .getProperty(CLUSTER_ID) - + ":" + value.getMetadata().getName()) - .block().stream().map(TagTopicInfo::typeName).toList())); + if (isConfluentCloud()) { + topics.forEach((key, value) -> { + List tags = schemaRegistryClient.getTopicWithTags(managedClusterProperties.getName(), + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + + ":" + value.getMetadata().getName()).block(); + + value.getSpec().setTags(tags != null ? tags.stream().map(TagTopicInfo::typeName).toList() : + Collections.emptyList()); + }); } } - public boolean isConfluent() { + /** + * Check if the current cluster is Confluent Cloud. + * + * @return true if it is, false otherwise + */ + public boolean isConfluentCloud() { return managedClusterProperties.getProvider().equals(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); } @@ -349,11 +292,21 @@ public Map collectBrokerTopicsFromNames(List topicNames) return topics; } + /** + * Alter topics. + * + * @param toUpdate The topics to update + * @param topics The current topics + */ private void alterTopics(Map> toUpdate, List topics) { AlterConfigsResult alterConfigsResult = getAdminClient().incrementalAlterConfigs(toUpdate); alterConfigsResult.values().forEach((key, value) -> { - Topic updatedTopic = - topics.stream().filter(t -> t.getMetadata().getName().equals(key.name())).findFirst().get(); + Topic updatedTopic = topics + .stream() + .filter(t -> t.getMetadata().getName().equals(key.name())) + .findFirst() + .get(); + try { value.get(10, TimeUnit.SECONDS); updatedTopic.getMetadata().setCreationTimestamp(Date.from(Instant.now())); @@ -363,7 +316,7 @@ private void alterTopics(Map> toUpdate log.info("Success updating topic configs {} on {}: [{}]", key.name(), managedClusterProperties.getName(), - toUpdate.get(key).stream().map(AlterConfigOp::toString).collect(Collectors.joining(","))); + toUpdate.get(key).stream().map(AlterConfigOp::toString).collect(Collectors.joining(", "))); } catch (InterruptedException e) { log.error("Error", e); Thread.currentThread().interrupt(); @@ -371,12 +324,17 @@ private void alterTopics(Map> toUpdate updatedTopic.setStatus( Topic.TopicStatus.ofFailed("Error while updating topic configs: " + e.getMessage())); log.error(String.format("Error while updating topic configs %s on %s", key.name(), - this.managedClusterProperties.getName()), e); + managedClusterProperties.getName()), e); } topicRepository.create(updatedTopic); }); } + /** + * Create topics. + * + * @param topics The topics to create + */ private void createTopics(List topics) { List newTopics = topics.stream() .map(topic -> { @@ -384,34 +342,81 @@ private void createTopics(List topics) { NewTopic newTopic = new NewTopic(topic.getMetadata().getName(), topic.getSpec().getPartitions(), (short) topic.getSpec().getReplicationFactor()); newTopic.configs(topic.getSpec().getConfigs()); - log.debug("{}", newTopic); return newTopic; }) .toList(); CreateTopicsResult createTopicsResult = getAdminClient().createTopics(newTopics); createTopicsResult.values().forEach((key, value) -> { - Topic createdTopic = topics.stream().filter(t -> t.getMetadata().getName().equals(key)).findFirst().get(); + Topic createdTopic = topics + .stream() + .filter(t -> t.getMetadata().getName().equals(key)) + .findFirst() + .get(); + try { value.get(10, TimeUnit.SECONDS); createdTopic.getMetadata().setCreationTimestamp(Date.from(Instant.now())); createdTopic.getMetadata().setGeneration(1); createdTopic.setStatus(Topic.TopicStatus.ofSuccess("Topic created")); - log.info("Success creating topic {} on {}", key, this.managedClusterProperties.getName()); + log.info("Success creating topic {} on {}", key, managedClusterProperties.getName()); } catch (InterruptedException e) { log.error("Error", e); Thread.currentThread().interrupt(); } catch (Exception e) { createdTopic.setStatus(Topic.TopicStatus.ofFailed("Error while creating topic: " + e.getMessage())); log.error( - String.format("Error while creating topic %s on %s", key, this.managedClusterProperties.getName()), + String.format("Error while creating topic %s on %s", key, managedClusterProperties.getName()), e); } topicRepository.create(createdTopic); }); } + /** + * Create tags. + * + * @param tagsToCreate The tags to create + */ + private void createTags(List tagsToCreate) { + String stringTags = String.join(", ", tagsToCreate + .stream() + .map(Record::toString) + .toList()); + + schemaRegistryClient.addTags(managedClusterProperties.getName(), tagsToCreate) + .subscribe(success -> log.info(String.format("Success creating tag %s.", stringTags)), + error -> log.error(String.format("Error creating tag %s.", stringTags), error)); + } + /** + * Delete tags. + * + * @param tagsToDelete The tags to delete + * @param topicName The topic name + */ + private void deleteTags(Collection tagsToDelete, String topicName) { + tagsToDelete + .forEach(tag -> schemaRegistryClient.deleteTag(managedClusterProperties.getName(), + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + + ":" + topicName, tag) + .subscribe(success -> log.info(String.format("Success deleting tag %s.", + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + + topicName + + "/" + tag)), + error -> log.error(String.format("Error deleting tag %s.", + managedClusterProperties.getConfig().getProperty(CLUSTER_ID) + ":" + + topicName + + "/" + tag), error))); + } + + /** + * Compute the configuration changes. + * + * @param expected The config from Ns4Kafka + * @param actual The config from cluster + * @return A list of config + */ private Collection computeConfigChanges(Map expected, Map actual) { List toCreate = expected.entrySet() .stream() diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java index 119c736e..21476cd8 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java @@ -6,7 +6,6 @@ import com.michelin.ns4kafka.repositories.ResourceQuotaRepository; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.annotation.EachBean; -import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.security.SecureRandom; import java.util.Base64; @@ -17,6 +16,7 @@ import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.ScramCredentialInfo; @@ -33,6 +33,7 @@ @Slf4j @EachBean(ManagedClusterProperties.class) @Singleton +@AllArgsConstructor public class UserAsyncExecutor { public static final double BYTE_RATE_DEFAULT_VALUE = 102400.0; @@ -42,11 +43,9 @@ public class UserAsyncExecutor { private final AbstractUserSynchronizer userExecutor; - @Inject - NamespaceRepository namespaceRepository; + private NamespaceRepository namespaceRepository; - @Inject - ResourceQuotaRepository quotaRepository; + private ResourceQuotaRepository quotaRepository; /** * Constructor. @@ -273,4 +272,4 @@ public Map> listQuotas() { throw exception; } } -} \ No newline at end of file +} diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 10a80192..3854ce35 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -30,7 +30,6 @@ import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -235,7 +234,6 @@ void createNewTopic() throws InterruptedException, ExecutionException, TimeoutEx .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -279,7 +277,6 @@ void shouldCreateNewTopicWithNoConstraint() throws InterruptedException, Executi .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -322,7 +319,6 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -336,7 +332,6 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -357,119 +352,99 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep assertEquals("test.topic", actual.getMetadata().getName()); } - /** - * Validate topic update with two new tags. - * - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test - void updateTopicWithNewTags() throws InterruptedException, ExecutionException, TimeoutException { + void shouldValidateNewTags() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .tags(Collections.emptyList()) - .configs(Map.of("cleanup.policy", "compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .tags(Arrays.asList("TAG1", "TAG3")) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .tags(Arrays.asList("TAG1", "TAG2")) - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .tags(Arrays.asList("TAG1", "TAG2")) + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); - when(topicService.create(topic)).thenReturn(topic); - when(securityService.username()).thenReturn(Optional.of("test-user")); - when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); - doNothing().when(applicationEventPublisher).publishEvent(any()); + when(topicService.validateTags(ns, topic)).thenReturn(List.of("Error on tags")); - var response = topicController.apply("test", topic, false); - Topic actual = response.body(); - assertEquals("changed", response.header("X-Ns4kafka-Result")); - assertEquals("test.topic", actual.getMetadata().getName()); - assertEquals(2, actual.getSpec().getTags().size()); - assertEquals("TAG1", actual.getSpec().getTags().get(0)); - assertEquals("TAG2", actual.getSpec().getTags().get(1)); + ResourceValidationException actual = + assertThrows(ResourceValidationException.class, () -> topicController.apply("test", topic, false)); + assertEquals(1, actual.getValidationErrors().size()); + assertLinesMatch(List.of("Error on tags"), actual.getValidationErrors()); } - - /** - * Validate topic update with a tag to delete. - * - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ + @Test - void updateTopicWithTagToDelete() throws InterruptedException, ExecutionException, TimeoutException { + void shouldNotValidateTagsWhenNoNewTag() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .tags(Arrays.asList("TAG1", "TAG2")) - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy", "compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .tags(Arrays.asList("TAG1", "TAG2")) + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of("TAG1")) - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG1")) + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); when(topicService.create(topic)).thenReturn(topic); when(securityService.username()).thenReturn(Optional.of("test-user")); @@ -506,7 +481,6 @@ void updateTopicValidationErrors() { .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -520,7 +494,6 @@ void updateTopicValidationErrors() { .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(6) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -561,7 +534,6 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -575,7 +547,6 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -612,7 +583,6 @@ void createNewTopicDryRun() throws InterruptedException, ExecutionException, Tim .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -647,7 +617,6 @@ void createNewTopicFailValidation() { .spec(Topic.TopicSpec.builder() .replicationFactor(1) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -683,7 +652,6 @@ void shouldNotFailWhenCreatingNewTopicWithNoValidator() .spec(Topic.TopicSpec.builder() .replicationFactor(1) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -719,7 +687,6 @@ void shouldNotFailWhenCreatingNewTopicWithNoValidationConstraint() .spec(Topic.TopicSpec.builder() .replicationFactor(1) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -754,7 +721,6 @@ void createNewTopicFailQuotaValidation() { .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) @@ -1108,7 +1074,6 @@ void createCollidingTopic() throws InterruptedException, ExecutionException, Tim .spec(Topic.TopicSpec.builder() .replicationFactor(3) .partitions(3) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "2", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java b/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java index f0da94da..681b4ba2 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java @@ -42,7 +42,6 @@ import jakarta.inject.Inject; import java.net.MalformedURLException; import java.net.URL; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -207,7 +206,6 @@ void deployConnectors() throws InterruptedException, MalformedURLException { .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -325,7 +323,6 @@ void updateConnectorsWithNullProperty() throws InterruptedException, MalformedUR .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -375,7 +372,6 @@ void restartConnector() throws InterruptedException { .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -430,7 +426,6 @@ void pauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio .spec(Topic.TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java b/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java index e53dc426..9f09a446 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java @@ -40,7 +40,6 @@ import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -219,7 +218,6 @@ void createTopic() throws InterruptedException, ExecutionException { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -262,7 +260,6 @@ void updateTopic() throws InterruptedException, ExecutionException { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -287,7 +284,6 @@ void updateTopic() throws InterruptedException, ExecutionException { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "70000"))//This line was changed @@ -333,7 +329,6 @@ void invalidTopicName() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -375,7 +370,6 @@ void updateTopicNoChange() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -393,7 +387,6 @@ void updateTopicNoChange() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "90000")) @@ -422,7 +415,6 @@ void testDeleteRecords() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "delete", "min.insync.replicas", "1", "retention.ms", "60000")) @@ -485,7 +477,6 @@ void testDeleteRecordsCompactTopic() { .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) - .tags(Collections.emptyList()) .configs(Map.of("cleanup.policy", "compact", "min.insync.replicas", "1", "retention.ms", "60000")) diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 627e1d44..89743a0c 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -887,22 +887,22 @@ void findAll() { @Test void shouldTagsBeValid() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of("TAG_TEST")).build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) + .build(); List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); when(managedClusterProperties.stream()).thenReturn(Stream.of( - new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); + new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); List validationErrors = topicService.validateTags(ns, topic); @@ -912,77 +912,77 @@ void shouldTagsBeValid() { @Test void shouldTagsBeInvalidWhenNotConfluentCloud() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of("TAG_TEST")).build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) + .build(); when(managedClusterProperties.stream()).thenReturn(Stream.of( - new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.SELF_MANAGED))); + new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.SELF_MANAGED))); List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); - assertEquals("Invalid value (TAG_TEST) for tags: Tags are not currently supported.", validationErrors.get(0)); + assertEquals("Invalid value TAG_TEST for tags: Tags are not currently supported.", validationErrors.get(0)); } @Test void shouldTagsBeInvalidWhenNoTagsAllowed() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of("TAG_TEST")).build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("TAG_TEST")).build()) + .build(); when(managedClusterProperties.stream()).thenReturn(Stream.of( - new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); + new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(Collections.emptyList())); List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); assertEquals( - "Invalid value (TAG_TEST) for tags: No tags allowed.", - validationErrors.get(0)); + "Invalid value TAG_TEST for tags: No tags allowed.", + validationErrors.get(0)); } @Test void shouldTagsBeInvalidWhenNotAllowed() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of("BAD_TAG")).build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of("BAD_TAG")).build()) + .build(); List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); when(managedClusterProperties.stream()) - .thenReturn(Stream.of( - new ManagedClusterProperties("local", - ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); + .thenReturn(Stream.of( + new ManagedClusterProperties("local", + ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); when(schemaRegistryClient.getTags("local")).thenReturn(Mono.just(tagInfo)); List validationErrors = topicService.validateTags(ns, topic); assertEquals(1, validationErrors.size()); - assertEquals("Invalid value (BAD_TAG) for tags: Available tags are (TAG_TEST).", validationErrors.get(0)); + assertEquals("Invalid value BAD_TAG for tags: Available tags are TAG_TEST.", validationErrors.get(0)); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java b/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java deleted file mode 100644 index 5c6e937f..00000000 --- a/src/test/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClientTest.java +++ /dev/null @@ -1,130 +0,0 @@ -package com.michelin.ns4kafka.services.clients.schema; - -import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.when; - -import com.michelin.ns4kafka.properties.ManagedClusterProperties; -import com.michelin.ns4kafka.services.clients.schema.entities.TagInfo; -import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; -import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; -import io.micronaut.core.type.Argument; -import io.micronaut.http.HttpRequest; -import io.micronaut.http.HttpResponse; -import io.micronaut.http.client.HttpClient; -import java.util.List; -import java.util.stream.Stream; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import reactor.core.publisher.Mono; - -/** - * Test schema registry. - */ -@ExtendWith(MockitoExtension.class) -class SchemaRegistryClientTest { - - private static final String KAFKA_CLUSTER = "local"; - private static final String TAG = "TAG"; - private static final String ENTITY_NAME = "ENTITY_NAME"; - - @Mock - HttpClient httpClient; - - @Mock - List managedClusterPropertiesList; - - @InjectMocks - SchemaRegistryClient schemaRegistryClient; - - static ManagedClusterProperties.SchemaRegistryProperties schemaRegistryProperties; - - static ManagedClusterProperties managedClusterProperties; - - @BeforeAll - static void init() { - schemaRegistryProperties = new ManagedClusterProperties.SchemaRegistryProperties(); - schemaRegistryProperties.setUrl("URL"); - schemaRegistryProperties.setBasicAuthUsername("USER"); - schemaRegistryProperties.setBasicAuthPassword("PASSWORD"); - - managedClusterProperties = - new ManagedClusterProperties(KAFKA_CLUSTER, ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); - managedClusterProperties.setSchemaRegistry(schemaRegistryProperties); - } - - @Test - void getTagsTest() { - TagInfo tagInfo = TagInfo.builder().name(TAG).build(); - - when(managedClusterPropertiesList.stream()).thenReturn( - Stream.of(managedClusterProperties)); - when(httpClient.retrieve((HttpRequest) any(), (Argument) any())) - .thenReturn(Mono.just(List.of(tagInfo))); - - Mono> tagInfos = schemaRegistryClient.getTags(KAFKA_CLUSTER); - - assertEquals(TAG, tagInfos.block().get(0).name()); - } - - @Test - void getTopicWithTagsTest() { - TagTopicInfo tagTopicInfo = TagTopicInfo.builder() - .entityType(TOPIC_ENTITY_TYPE) - .typeName(TAG) - .entityName(ENTITY_NAME).build(); - - when(managedClusterPropertiesList.stream()).thenReturn( - Stream.of(managedClusterProperties)); - when(httpClient.retrieve((HttpRequest) any(), (Argument) any())) - .thenReturn(Mono.just(List.of(tagTopicInfo))); - - Mono> tagInfos = schemaRegistryClient.getTopicWithTags(KAFKA_CLUSTER, ENTITY_NAME); - - assertEquals(ENTITY_NAME, tagInfos.block().get(0).entityName()); - assertEquals(TAG, tagInfos.block().get(0).typeName()); - assertEquals(TOPIC_ENTITY_TYPE, tagInfos.block().get(0).entityType()); - } - - @Test - void addTagsTest() { - TagTopicInfo tagTopicInfo = TagTopicInfo.builder() - .entityType(TOPIC_ENTITY_TYPE) - .entityName(ENTITY_NAME) - .typeName(TAG).build(); - - TagSpecs tagSpecs = TagSpecs.builder() - .entityType(TOPIC_ENTITY_TYPE) - .entityName(ENTITY_NAME) - .typeName(TAG).build(); - - when(managedClusterPropertiesList.stream()).thenReturn( - Stream.of(managedClusterProperties)); - when(httpClient.retrieve((HttpRequest) any(), (Argument) any())) - .thenReturn(Mono.just(List.of(tagTopicInfo))); - - Mono> tagInfos = schemaRegistryClient.addTags(KAFKA_CLUSTER, List.of(tagSpecs)); - - assertEquals(ENTITY_NAME, tagInfos.block().get(0).entityName()); - assertEquals(TAG, tagInfos.block().get(0).typeName()); - assertEquals(TOPIC_ENTITY_TYPE, tagInfos.block().get(0).entityType()); - } - - @Test - void deleteTagTest() { - when(managedClusterPropertiesList.stream()).thenReturn( - Stream.of(managedClusterProperties)); - when(httpClient.exchange((HttpRequest) any(), (Class) any())) - .thenReturn(Mono.just(HttpResponse.accepted())); - - Mono> deleteInfo = schemaRegistryClient.deleteTag(KAFKA_CLUSTER, ENTITY_NAME, TAG); - - assertNotNull(deleteInfo); - } -} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java b/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java deleted file mode 100644 index bfe7841f..00000000 --- a/src/test/java/com/michelin/ns4kafka/services/executors/HttpResponseMock.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.michelin.ns4kafka.services.executors; - -import io.micronaut.core.convert.value.MutableConvertibleValues; -import io.micronaut.http.HttpHeaders; -import io.micronaut.http.HttpResponse; -import io.micronaut.http.HttpStatus; -import java.util.Optional; - -/** - * Class to Mock Http Response. - */ -public class HttpResponseMock implements HttpResponse { - @Override - public HttpStatus getStatus() { - return null; - } - - @Override - public HttpHeaders getHeaders() { - return null; - } - - @Override - public MutableConvertibleValues getAttributes() { - return null; - } - - @Override - public Optional getBody() { - return Optional.empty(); - } - - @Override - public String reason() { - return null; - } - - @Override - public int code() { - return 0; - } -} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java b/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java deleted file mode 100644 index e734b3f3..00000000 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TagSpecsArgumentMatcher.java +++ /dev/null @@ -1,27 +0,0 @@ -package com.michelin.ns4kafka.services.executors; - -import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; -import java.util.List; -import org.mockito.ArgumentMatcher; - -/** - * Matcher for TagSpecs. - */ -public class TagSpecsArgumentMatcher implements ArgumentMatcher> { - - private List left; - - public TagSpecsArgumentMatcher(List tagSpecsList) { - this.left = tagSpecsList; - } - - @Override - public boolean matches(List right) { - if (left.size() != right.size()) { - return false; - } - return left.get(0).entityName().equals(right.get(0).entityName()) - && left.get(0).entityType().equals(right.get(0).entityType()) - && left.get(0).typeName().equals(right.get(0).typeName()); - } -} diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index ec8baa93..4951a093 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -2,13 +2,11 @@ import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.CLUSTER_ID; import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.argThat; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.times; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -16,11 +14,6 @@ import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; -import com.michelin.ns4kafka.services.clients.schema.entities.TagSpecs; -import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; @@ -33,12 +26,12 @@ @ExtendWith(MockitoExtension.class) class TopicAsyncExecutorTest { - private static final String CLUSTER_ID_TEST = "cluster_id_test"; private static final String LOCAL_CLUSTER = "local"; private static final String TOPIC_NAME = "topic"; private static final String TAG1 = "TAG1"; private static final String TAG2 = "TAG2"; + private static final String TAG3 = "TAG3"; @Mock SchemaRegistryClient schemaRegistryClient; @@ -50,173 +43,84 @@ class TopicAsyncExecutorTest { TopicAsyncExecutor topicAsyncExecutor; @Test - void createTagsShouldAddTags() { + void shouldDeleteTagsAndNotCreateIfEmpty() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - managedClusterProperties.setConfig(properties); - when(schemaRegistryClient.addTags(anyString(), anyList())).thenReturn(Mono.just(new ArrayList<>())); - when(managedClusterProperties.getConfig()).thenReturn(properties); + when(schemaRegistryClient.deleteTag(anyString(), + anyString(), anyString())) + .thenReturn(Mono.empty()) + .thenReturn(Mono.error(new Exception("error"))); when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); - when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); - - List ns4kafkaTopics = new ArrayList<>(); - Topic ns4kafkaTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).generation(1).build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1)).build()).build(); - ns4kafkaTopics.add(ns4kafkaTopic); - - Map brokerTopics = new HashMap<>(); - Topic brokerTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) - .spec(Topic.TopicSpec.builder() - .tags(Collections.emptyList()).build()).build(); - brokerTopics.put(TOPIC_NAME, brokerTopic); - - topicAsyncExecutor.manageTags(ns4kafkaTopics, brokerTopics); - - List tagSpecsList = new ArrayList<>(); - TagSpecs tagSpecs = TagSpecs.builder().typeName(TAG1) - .entityName(CLUSTER_ID_TEST + ":" + TOPIC_NAME) - .entityType(TOPIC_ENTITY_TYPE).build(); - tagSpecsList.add(tagSpecs); - verify(schemaRegistryClient, times(1)) - .addTags(eq(LOCAL_CLUSTER), argThat(new TagSpecsArgumentMatcher(tagSpecsList))); - } - - @Test - void createTagsShouldNotAddTags() { - Properties properties = new Properties(); - properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - managedClusterProperties.setConfig(properties); + when(managedClusterProperties.getConfig()).thenReturn(properties); - List ns4kafkaTopics = new ArrayList<>(); - Topic ns4kafkaTopic = Topic.builder() + List ns4kafkaTopics = List.of( + Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) + .name(TOPIC_NAME) + .build()) .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1)).build()).build(); - ns4kafkaTopics.add(ns4kafkaTopic); + .tags(List.of(TAG1)) + .build()) + .build()); - Map brokerTopics = new HashMap<>(); - Topic brokerTopic = Topic.builder() + Map brokerTopics = Map.of(TOPIC_NAME, + Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) + .name(TOPIC_NAME) + .build()) .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1)).build()).build(); - brokerTopics.put(TOPIC_NAME, brokerTopic); + .tags(List.of(TAG1, TAG2, TAG3)) + .build()) + .build()); - topicAsyncExecutor.createTags(ns4kafkaTopics, brokerTopics); + topicAsyncExecutor.alterTags(ns4kafkaTopics, brokerTopics); - verify(schemaRegistryClient, times(0)).addTags(anyString(), anyList()); + verify(schemaRegistryClient).deleteTag(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME, TAG2); + verify(schemaRegistryClient).deleteTag(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME, TAG3); } @Test - void deleteTagsShouldDeleteTags() { + void shouldCreateTags() { Properties properties = new Properties(); properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - managedClusterProperties.setConfig(properties); - when(schemaRegistryClient.deleteTag(anyString(), anyString(), anyString())) - .thenReturn(Mono.just(new HttpResponseMock())); - when(managedClusterProperties.getConfig()).thenReturn(properties); + when(schemaRegistryClient.addTags(anyString(), anyList())) + .thenReturn(Mono.empty()) + .thenReturn(Mono.error(new Exception("error"))); when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getConfig()).thenReturn(properties); - List ns4kafkaTopics = new ArrayList<>(); - Topic ns4kafkaTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG2)).build()).build(); - ns4kafkaTopics.add(ns4kafkaTopic); - - Map brokerTopics = new HashMap<>(); - Topic brokerTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) - .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1, TAG2)).build()).build(); - brokerTopics.put(TOPIC_NAME, brokerTopic); - - topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); - - verify(schemaRegistryClient, times(1)) - .deleteTag(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME, TAG1); - } - - @Test - void deleteTagsShouldNotDeleteTags() { - Properties properties = new Properties(); - properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - managedClusterProperties.setConfig(properties); - - List ns4kafkaTopics = new ArrayList<>(); - Topic ns4kafkaTopic = Topic.builder() + List ns4kafkaTopics = List.of( + Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) + .name(TOPIC_NAME) + .build()) .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1)).build()).build(); - ns4kafkaTopics.add(ns4kafkaTopic); + .tags(List.of(TAG1)) + .build()) + .build()); - Map brokerTopics = new HashMap<>(); - Topic brokerTopic = Topic.builder() + Map brokerTopics = Map.of(TOPIC_NAME, + Topic.builder() .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) + .name(TOPIC_NAME) + .build()) .spec(Topic.TopicSpec.builder() - .tags(List.of(TAG1)).build()).build(); - brokerTopics.put(TOPIC_NAME, brokerTopic); + .build()) + .build()); - topicAsyncExecutor.deleteTags(ns4kafkaTopics, brokerTopics); + topicAsyncExecutor.alterTags(ns4kafkaTopics, brokerTopics); - verify(schemaRegistryClient, times(0)).deleteTag(anyString(), anyString(), anyString()); + verify(schemaRegistryClient).addTags(eq(LOCAL_CLUSTER), argThat(tags -> + tags.get(0).entityName().equals(CLUSTER_ID_TEST + ":" + TOPIC_NAME) + && tags.get(0).typeName().equals(TAG1) + && tags.get(0).entityType().equals(TOPIC_ENTITY_TYPE))); } @Test - void completeWithTagsShouldComplete() { - Properties properties = new Properties(); - properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - managedClusterProperties.setConfig(properties); - - TagTopicInfo tagTopicInfo = TagTopicInfo.builder().typeName(TAG1).build(); - - when(schemaRegistryClient.getTopicWithTags(anyString(), anyString())) - .thenReturn(Mono.just(List.of(tagTopicInfo))); - when(managedClusterProperties.getConfig()).thenReturn(properties); - when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + void shouldBeConfluentCloud() { when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); - - Map brokerTopics = new HashMap<>(); - Topic brokerTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) - .spec(Topic.TopicSpec.builder().build()).build(); - brokerTopics.put(TOPIC_NAME, brokerTopic); - - topicAsyncExecutor.enrichWithTags(brokerTopics); - - assertEquals(TAG1, brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); - } - - @Test - void completeWithTagsShouldNotComplete() { - Properties properties = new Properties(); - properties.put(CLUSTER_ID, CLUSTER_ID_TEST); - managedClusterProperties.setConfig(properties); - - when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); - - Map brokerTopics = new HashMap<>(); - Topic brokerTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name(TOPIC_NAME).build()) - .spec(Topic.TopicSpec.builder().build()).build(); - brokerTopics.put(TOPIC_NAME, brokerTopic); - - topicAsyncExecutor.enrichWithTags(brokerTopics); - - assertNull(brokerTopics.get(TOPIC_NAME).getSpec().getTags()); + assertTrue(topicAsyncExecutor.isConfluentCloud()); } } From 493a950b5c50652f7df560a2ae5e8cec53ae170f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Greffier?= Date: Sun, 8 Oct 2023 01:51:11 +0200 Subject: [PATCH 23/27] Fix tests --- .../ns4kafka/services/executors/UserAsyncExecutor.java | 9 +++++---- .../java/com/michelin/ns4kafka/integration/UserTest.java | 2 -- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java index 21476cd8..d2dde4f4 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java @@ -6,6 +6,7 @@ import com.michelin.ns4kafka.repositories.ResourceQuotaRepository; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.annotation.EachBean; +import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.security.SecureRandom; import java.util.Base64; @@ -16,7 +17,6 @@ import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.ScramCredentialInfo; @@ -33,7 +33,6 @@ @Slf4j @EachBean(ManagedClusterProperties.class) @Singleton -@AllArgsConstructor public class UserAsyncExecutor { public static final double BYTE_RATE_DEFAULT_VALUE = 102400.0; @@ -43,9 +42,11 @@ public class UserAsyncExecutor { private final AbstractUserSynchronizer userExecutor; - private NamespaceRepository namespaceRepository; + @Inject + NamespaceRepository namespaceRepository; - private ResourceQuotaRepository quotaRepository; + @Inject + ResourceQuotaRepository quotaRepository; /** * Constructor. diff --git a/src/test/java/com/michelin/ns4kafka/integration/UserTest.java b/src/test/java/com/michelin/ns4kafka/integration/UserTest.java index 6ced99fa..7022e4e7 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/UserTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/UserTest.java @@ -115,9 +115,7 @@ void init() { client.toBlocking() .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns3)); - //force User Sync userAsyncExecutors.forEach(UserAsyncExecutor::run); - } @Test From 96aab508ec09baeff0a2f53e0989335a25f56152 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Greffier?= Date: Sun, 8 Oct 2023 20:10:51 +0200 Subject: [PATCH 24/27] Improve tests --- .../executors/TopicAsyncExecutorTest.java | 136 ++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index 4951a093..2b43f9d3 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -2,11 +2,14 @@ import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.CLUSTER_ID; import static com.michelin.ns4kafka.services.executors.TopicAsyncExecutor.TOPIC_ENTITY_TYPE; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -14,9 +17,15 @@ import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; +import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DeleteTopicsResult; +import org.apache.kafka.common.KafkaFuture; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -39,6 +48,15 @@ class TopicAsyncExecutorTest { @Mock ManagedClusterProperties managedClusterProperties; + @Mock + Admin adminClient; + + @Mock + DeleteTopicsResult deleteTopicsResult; + + @Mock + KafkaFuture kafkaFuture; + @InjectMocks TopicAsyncExecutor topicAsyncExecutor; @@ -123,4 +141,122 @@ void shouldBeConfluentCloud() { when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); assertTrue(topicAsyncExecutor.isConfluentCloud()); } + + @Test + void shouldDeleteTopicNoTags() throws ExecutionException, InterruptedException, TimeoutException { + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + when(deleteTopicsResult.all()).thenReturn(kafkaFuture); + when(adminClient.deleteTopics(anyList())).thenReturn(deleteTopicsResult); + when(managedClusterProperties.getAdminClient()).thenReturn(adminClient); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME) + .build()) + .spec(Topic.TopicSpec.builder() + .build()) + .build(); + + topicAsyncExecutor.deleteTopic(topic); + + verify(schemaRegistryClient, never()).deleteTag(any(), any(), any()); + } + + @Test + void shouldDeleteTopicSelfManagedCluster() throws ExecutionException, InterruptedException, TimeoutException { + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); + when(deleteTopicsResult.all()).thenReturn(kafkaFuture); + when(adminClient.deleteTopics(anyList())).thenReturn(deleteTopicsResult); + when(managedClusterProperties.getAdminClient()).thenReturn(adminClient); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME) + .build()) + .spec(Topic.TopicSpec.builder() + .build()) + .build(); + + topicAsyncExecutor.deleteTopic(topic); + + verify(schemaRegistryClient, never()).deleteTag(any(), any(), any()); + } + + @Test + void shouldDeleteTopicAndTags() throws ExecutionException, InterruptedException, TimeoutException { + Properties properties = new Properties(); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); + + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + when(deleteTopicsResult.all()).thenReturn(kafkaFuture); + when(adminClient.deleteTopics(anyList())).thenReturn(deleteTopicsResult); + when(managedClusterProperties.getAdminClient()).thenReturn(adminClient); + when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getConfig()).thenReturn(properties); + when(schemaRegistryClient.deleteTag(anyString(), + anyString(), anyString())) + .thenReturn(Mono.empty()) + .thenReturn(Mono.error(new Exception("error"))); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME) + .build()) + .spec(Topic.TopicSpec.builder() + .tags(List.of(TAG1)) + .build()) + .build(); + + topicAsyncExecutor.deleteTopic(topic); + + verify(schemaRegistryClient).deleteTag(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME, TAG1); + } + + @Test + void shouldNotEnrichWithTagsWhenNotConfluentCloud() { + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.SELF_MANAGED); + + Map brokerTopics = Map.of(TOPIC_NAME, + Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME) + .build()) + .spec(Topic.TopicSpec.builder() + .build()) + .build()); + + topicAsyncExecutor.enrichWithTags(brokerTopics); + + assertTrue(brokerTopics.get(TOPIC_NAME).getSpec().getTags().isEmpty()); + } + + @Test + void shouldEnrichWithTagsWhenConfluentCloud() { + Properties properties = new Properties(); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); + + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getConfig()).thenReturn(properties); + + TagTopicInfo tagTopicInfo = TagTopicInfo.builder() + .typeName("typeName") + .build(); + + when(schemaRegistryClient.getTopicWithTags(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME)) + .thenReturn(Mono.just(List.of(tagTopicInfo))); + + Map brokerTopics = Map.of(TOPIC_NAME, + Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME) + .build()) + .spec(Topic.TopicSpec.builder() + .build()) + .build()); + + topicAsyncExecutor.enrichWithTags(brokerTopics); + + assertEquals("typeName", brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); + } } From 73bebbb83384a953933815892d612d9b0eb9280d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Greffier?= Date: Sun, 8 Oct 2023 20:34:55 +0200 Subject: [PATCH 25/27] Improve tests --- .../services/executors/UserAsyncExecutor.java | 4 +-- .../executors/TopicAsyncExecutorTest.java | 31 +++++++++++++++++-- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java index d2dde4f4..2b3f6296 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java @@ -137,7 +137,7 @@ private Map> collectNs4kafkaQuotas() { .stream() .filter(q -> q.getKey().startsWith(USER_QUOTA_PREFIX)) .forEach(q -> userQuota.put( - q.getKey().replaceAll(USER_QUOTA_PREFIX, ""), + q.getKey().replace(USER_QUOTA_PREFIX, ""), Double.parseDouble(q.getValue())))); return Map.entry(namespace.getSpec().getKafkaUser(), userQuota); @@ -165,7 +165,7 @@ static class Scram512UserSynchronizer implements AbstractUserSynchronizer { private final ScramCredentialInfo info = new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_512, 4096); private final SecureRandom secureRandom = new SecureRandom(); - private Admin admin; + private final Admin admin; public Scram512UserSynchronizer(Admin admin) { this.admin = admin; diff --git a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java index 2b43f9d3..0f4e1686 100644 --- a/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutorTest.java @@ -18,6 +18,7 @@ import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient; import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo; +import io.micronaut.http.HttpResponse; import java.util.List; import java.util.Map; import java.util.Properties; @@ -195,7 +196,7 @@ void shouldDeleteTopicAndTags() throws ExecutionException, InterruptedException, when(managedClusterProperties.getConfig()).thenReturn(properties); when(schemaRegistryClient.deleteTag(anyString(), anyString(), anyString())) - .thenReturn(Mono.empty()) + .thenReturn(Mono.just(HttpResponse.ok())) .thenReturn(Mono.error(new Exception("error"))); Topic topic = Topic.builder() @@ -256,7 +257,33 @@ void shouldEnrichWithTagsWhenConfluentCloud() { .build()); topicAsyncExecutor.enrichWithTags(brokerTopics); - + assertEquals("typeName", brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0)); } + + @Test + void shouldEnrichWithTagsWhenConfluentCloudAndResponseIsNull() { + Properties properties = new Properties(); + properties.put(CLUSTER_ID, CLUSTER_ID_TEST); + + when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER); + when(managedClusterProperties.getConfig()).thenReturn(properties); + + when(schemaRegistryClient.getTopicWithTags(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME)) + .thenReturn(Mono.empty()); + + Map brokerTopics = Map.of(TOPIC_NAME, + Topic.builder() + .metadata(ObjectMeta.builder() + .name(TOPIC_NAME) + .build()) + .spec(Topic.TopicSpec.builder() + .build()) + .build()); + + topicAsyncExecutor.enrichWithTags(brokerTopics); + + assertTrue(brokerTopics.get(TOPIC_NAME).getSpec().getTags().isEmpty()); + } } From 0cdac36cbd8e298ccf82f0598710b64c7ffbbb24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Greffier?= Date: Mon, 9 Oct 2023 11:21:56 +0200 Subject: [PATCH 26/27] Only display forbidden tags --- .../com/michelin/ns4kafka/services/TopicService.java | 10 ++++++++-- .../michelin/ns4kafka/services/TopicServiceTest.java | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index ac262e9b..fb8479a5 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -358,10 +358,16 @@ public List validateTags(Namespace namespace, Topic topic) { return validationErrors; } - if (!tagNames.containsAll(topic.getSpec().getTags())) { + List unavailableTagNames = topic.getSpec().getTags() + .stream() + .filter(tagName -> !tagNames.contains(tagName)) + .toList(); + + if (!unavailableTagNames.isEmpty()) { validationErrors.add(String.format( "Invalid value %s for tags: Available tags are %s.", - String.join(", ", topic.getSpec().getTags()), String.join(",", tagNames))); + String.join(", ", unavailableTagNames), + String.join(", ", tagNames))); } return validationErrors; diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 89743a0c..fa277b11 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -970,7 +970,7 @@ void shouldTagsBeInvalidWhenNotAllowed() { Topic topic = Topic.builder() .metadata(ObjectMeta.builder().name("ns-topic1").build()) .spec(Topic.TopicSpec.builder() - .tags(List.of("BAD_TAG")).build()) + .tags(List.of("BAD_TAG", "TAG_TEST")).build()) .build(); List tagInfo = List.of(TagInfo.builder().name("TAG_TEST").build()); From 34eb0d2e715fbc977661bff8832c38299ff9aeb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Greffier?= Date: Mon, 9 Oct 2023 12:09:16 +0200 Subject: [PATCH 27/27] Add links to Confluent Cloud tags --- README.md | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index b0b413cb..42e2dc32 100644 --- a/README.md +++ b/README.md @@ -203,22 +203,22 @@ ns4kafka: The name for each managed cluster has to be unique. This is this name you have to set in the field **metadata.cluster** of your namespace descriptors. -| Property | type | description | -|-----------------------------------------|---------|-------------------------------------------------------------| -| manage-users | boolean | Does the cluster manages users ? | -| manage-acls | boolean | Does the cluster manages access control entries ? | -| manage-topics | boolean | Does the cluster manages topics ? | -| manage-connectors | boolean | Does the cluster manages connects ? | -| drop-unsync-acls | boolean | Should Ns4Kafka drop unsynchronized ACLs | -| provider | boolean | The kind of cluster. Either SELF_MANAGED or CONFLUENT_CLOUD | -| config.bootstrap.servers | string | The location of the clusters servers | -| config.cluster.id | string | The cluster id. Required to use Confluent Cloud tags. | -| schema-registry.url | string | The location of the Schema Registry | -| schema-registry.basicAuthUsername | string | Basic authentication username to the Schema Registry | -| schema-registry.basicAuthPassword | string | Basic authentication password to the Schema Registry | -| connects.connect-name.url | string | The location of the kafka connect | -| connects.connect-name.basicAuthUsername | string | Basic authentication username to the Kafka Connect | -| connects.connect-name.basicAuthPassword | string | Basic authentication password to the Kafka Connect | +| Property | type | description | +|-----------------------------------------|---------|----------------------------------------------------------------------------------------------------------------------------------------| +| manage-users | boolean | Does the cluster manages users ? | +| manage-acls | boolean | Does the cluster manages access control entries ? | +| manage-topics | boolean | Does the cluster manages topics ? | +| manage-connectors | boolean | Does the cluster manages connects ? | +| drop-unsync-acls | boolean | Should Ns4Kafka drop unsynchronized ACLs | +| provider | boolean | The kind of cluster. Either SELF_MANAGED or CONFLUENT_CLOUD | +| config.bootstrap.servers | string | The location of the clusters servers | +| config.cluster.id | string | The cluster id. Required to use [Confluent Cloud tags](https://docs.confluent.io/cloud/current/stream-governance/stream-catalog.html). | +| schema-registry.url | string | The location of the Schema Registry | +| schema-registry.basicAuthUsername | string | Basic authentication username to the Schema Registry | +| schema-registry.basicAuthPassword | string | Basic authentication password to the Schema Registry | +| connects.connect-name.url | string | The location of the kafka connect | +| connects.connect-name.basicAuthUsername | string | Basic authentication username to the Kafka Connect | +| connects.connect-name.basicAuthPassword | string | Basic authentication password to the Kafka Connect | The configuration will depend on the authentication method selected for your broker, schema registry and Kafka Connect.