Skip to content

Commit

Permalink
Improve tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Loïc Greffier committed Oct 8, 2023
1 parent 96aab50 commit 73bebbb
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ private Map<String, Map<String, Double>> collectNs4kafkaQuotas() {
.stream()
.filter(q -> q.getKey().startsWith(USER_QUOTA_PREFIX))
.forEach(q -> userQuota.put(
q.getKey().replaceAll(USER_QUOTA_PREFIX, ""),
q.getKey().replace(USER_QUOTA_PREFIX, ""),
Double.parseDouble(q.getValue()))));

return Map.entry(namespace.getSpec().getKafkaUser(), userQuota);
Expand Down Expand Up @@ -165,7 +165,7 @@ static class Scram512UserSynchronizer implements AbstractUserSynchronizer {

private final ScramCredentialInfo info = new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_512, 4096);
private final SecureRandom secureRandom = new SecureRandom();
private Admin admin;
private final Admin admin;

public Scram512UserSynchronizer(Admin admin) {
this.admin = admin;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import com.michelin.ns4kafka.properties.ManagedClusterProperties;
import com.michelin.ns4kafka.services.clients.schema.SchemaRegistryClient;
import com.michelin.ns4kafka.services.clients.schema.entities.TagTopicInfo;
import io.micronaut.http.HttpResponse;
import java.util.List;
import java.util.Map;
import java.util.Properties;
Expand Down Expand Up @@ -195,7 +196,7 @@ void shouldDeleteTopicAndTags() throws ExecutionException, InterruptedException,
when(managedClusterProperties.getConfig()).thenReturn(properties);
when(schemaRegistryClient.deleteTag(anyString(),
anyString(), anyString()))
.thenReturn(Mono.empty())
.thenReturn(Mono.just(HttpResponse.ok()))
.thenReturn(Mono.error(new Exception("error")));

Topic topic = Topic.builder()
Expand Down Expand Up @@ -256,7 +257,33 @@ void shouldEnrichWithTagsWhenConfluentCloud() {
.build());

topicAsyncExecutor.enrichWithTags(brokerTopics);

assertEquals("typeName", brokerTopics.get(TOPIC_NAME).getSpec().getTags().get(0));
}

@Test
void shouldEnrichWithTagsWhenConfluentCloudAndResponseIsNull() {
Properties properties = new Properties();
properties.put(CLUSTER_ID, CLUSTER_ID_TEST);

when(managedClusterProperties.getProvider()).thenReturn(ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD);
when(managedClusterProperties.getName()).thenReturn(LOCAL_CLUSTER);
when(managedClusterProperties.getConfig()).thenReturn(properties);

when(schemaRegistryClient.getTopicWithTags(LOCAL_CLUSTER, CLUSTER_ID_TEST + ":" + TOPIC_NAME))
.thenReturn(Mono.empty());

Map<String, Topic> brokerTopics = Map.of(TOPIC_NAME,
Topic.builder()
.metadata(ObjectMeta.builder()
.name(TOPIC_NAME)
.build())
.spec(Topic.TopicSpec.builder()
.build())
.build());

topicAsyncExecutor.enrichWithTags(brokerTopics);

assertTrue(brokerTopics.get(TOPIC_NAME).getSpec().getTags().isEmpty());
}
}

0 comments on commit 73bebbb

Please sign in to comment.