Skip to content

Commit

Permalink
Add the possibility to configure Confluent Stream catalog limit (#402)
Browse files Browse the repository at this point in the history
* Set stream catalog API request limit to 500 & make the limit a parameter of the application

* Fix syntax

* Simplify properties ref

* Remove extra line

---------

Co-authored-by: thcai <[email protected]>
  • Loading branch information
ThomasCAI-mlv and ThomasCAI-mlv authored Jun 27, 2024
1 parent 64bf07f commit d50525e
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 13 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package com.michelin.ns4kafka.property;

import io.micronaut.context.annotation.ConfigurationProperties;
import lombok.Getter;
import lombok.Setter;

/**
* Confluent Cloud properties.
*/
@Getter
@Setter
@ConfigurationProperties("ns4kafka.confluent-cloud")
public class ConfluentCloudProperties {
private StreamCatalogProperties streamCatalog;

/**
* Stream Catalog properties.
*/
@Getter
@Setter
@ConfigurationProperties("stream-catalog")
public static class StreamCatalogProperties {
int pageSize = 500;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import com.michelin.ns4kafka.model.Metadata;
import com.michelin.ns4kafka.model.Topic;
import com.michelin.ns4kafka.property.ConfluentCloudProperties;
import com.michelin.ns4kafka.property.ManagedClusterProperties;
import com.michelin.ns4kafka.repository.TopicRepository;
import com.michelin.ns4kafka.repository.kafka.KafkaStoreException;
Expand Down Expand Up @@ -62,6 +63,8 @@ public class TopicAsyncExecutor {

private SchemaRegistryClient schemaRegistryClient;

private ConfluentCloudProperties confluentCloudProperties;

private Admin getAdminClient() {
return managedClusterProperties.getAdminClient();
}
Expand Down Expand Up @@ -111,18 +114,17 @@ public void synchronizeTopics() {
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));

if (!createTopics.isEmpty()) {
log.debug("Topic(s) to create: "
+ String.join(", ", createTopics.stream().map(topic -> topic.getMetadata().getName()).toList()));
log.debug("Topic(s) to create: {}", String.join(", ",
createTopics.stream().map(topic -> topic.getMetadata().getName()).toList()));
}

if (!updateTopics.isEmpty()) {
log.debug("Topic(s) to update: "
+ String.join(", ", updateTopics.keySet().stream().map(ConfigResource::name).toList()));
log.debug("Topic(s) to update: {}", String.join(", ",
updateTopics.keySet().stream().map(ConfigResource::name).toList()));
for (Map.Entry<ConfigResource, Collection<AlterConfigOp>> e : updateTopics.entrySet()) {
for (AlterConfigOp op : e.getValue()) {
log.debug(
e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "("
+ op.configEntry().value() + ")");
log.debug("{} {} {}({})", e.getKey().name(), op.opType().toString(),
op.configEntry().name(), op.configEntry().value());
}
}
}
Expand Down Expand Up @@ -271,7 +273,7 @@ public void enrichWithCatalogInfo(Map<String, Topic> topics) {

// getting list of topics by managing offset & limit
int offset = 0;
int limit = 5000;
int limit = confluentCloudProperties.getStreamCatalog().getPageSize();
do {
topicListResponse = schemaRegistryClient.getTopicWithCatalogInfo(
managedClusterProperties.getName(), limit, offset).block();
Expand Down Expand Up @@ -332,7 +334,7 @@ public Map<String, Topic> collectBrokerTopicsFromNames(List<String> topicNames)
.build())
.spec(Topic.TopicSpec.builder()
.replicationFactor(
topicDescriptions.get(stringMapEntry.getKey()).partitions().get(0).replicas().size())
topicDescriptions.get(stringMapEntry.getKey()).partitions().getFirst().replicas().size())
.partitions(topicDescriptions.get(stringMapEntry.getKey()).partitions().size())
.configs(stringMapEntry.getValue())
.build())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

import com.michelin.ns4kafka.model.Metadata;
import com.michelin.ns4kafka.model.Topic;
import com.michelin.ns4kafka.property.ConfluentCloudProperties;
import com.michelin.ns4kafka.property.ConfluentCloudProperties.StreamCatalogProperties;
import com.michelin.ns4kafka.property.ManagedClusterProperties;
import com.michelin.ns4kafka.repository.TopicRepository;
import com.michelin.ns4kafka.service.client.schema.SchemaRegistryClient;
Expand Down Expand Up @@ -59,6 +61,12 @@ class TopicAsyncExecutorTest {
@Mock
ManagedClusterProperties managedClusterProperties;

@Mock
ConfluentCloudProperties confluentCloudProperties;

@Mock
StreamCatalogProperties streamCatalogProperties;

@Mock
TopicRepository topicRepository;

Expand Down Expand Up @@ -428,6 +436,10 @@ void shouldNotEnrichWithCatalogInfoWhenNotConfluentCloud() {
void shouldEnrichWithCatalogInfoWhenConfluentCloud() {
when(managedClusterProperties.isConfluentCloud()).thenReturn(true);
when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER);
when(confluentCloudProperties.getStreamCatalog()).thenReturn(streamCatalogProperties);
when(streamCatalogProperties.getPageSize()).thenReturn(500);

int limit = 500;

TopicEntity entity = TopicEntity.builder()
.classificationNames(List.of(TAG1))
Expand All @@ -439,9 +451,9 @@ void shouldEnrichWithCatalogInfoWhenConfluentCloud() {
TopicListResponse response1 = TopicListResponse.builder().entities(List.of(entity)).build();
TopicListResponse response2 = TopicListResponse.builder().entities(List.of()).build();

when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, 5000, 0))
when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, limit, 0))
.thenReturn(Mono.just(response1));
when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, 5000, 5000))
when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, limit, limit))
.thenReturn(Mono.just(response2));

Map<String, Topic> brokerTopics = Map.of(
Expand All @@ -463,6 +475,10 @@ void shouldEnrichWithCatalogInfoWhenConfluentCloud() {
void shouldEnrichWithCatalogInfoForMultipleTopics() {
when(managedClusterProperties.isConfluentCloud()).thenReturn(true);
when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER);
when(confluentCloudProperties.getStreamCatalog()).thenReturn(streamCatalogProperties);
when(streamCatalogProperties.getPageSize()).thenReturn(500);

int limit = 500;

TopicEntity entity1 = TopicEntity.builder()
.classificationNames(List.of())
Expand Down Expand Up @@ -500,9 +516,9 @@ void shouldEnrichWithCatalogInfoForMultipleTopics() {
.entities(List.of(entity1, entity2, entity3, entity4)).build();
TopicListResponse response2 = TopicListResponse.builder().entities(List.of()).build();

when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, 5000, 0))
when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, limit, 0))
.thenReturn(Mono.just(response1));
when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, 5000, 5000))
when(schemaRegistryClient.getTopicWithCatalogInfo(LOCAL_CLUSTER, limit, limit))
.thenReturn(Mono.just(response2));

Map<String, Topic> brokerTopics = Map.of(
Expand Down Expand Up @@ -553,6 +569,8 @@ void shouldEnrichWithCatalogInfoForMultipleTopics() {
void shouldEnrichWithCatalogInfoWhenConfluentCloudAndResponseIsNull() {
when(managedClusterProperties.isConfluentCloud()).thenReturn(true);
when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER);
when(confluentCloudProperties.getStreamCatalog()).thenReturn(streamCatalogProperties);
when(streamCatalogProperties.getPageSize()).thenReturn(500);
when(schemaRegistryClient.getTopicWithCatalogInfo(anyString(), any(Integer.class), any(Integer.class)))
.thenReturn(Mono.empty());

Expand Down

0 comments on commit d50525e

Please sign in to comment.