diff --git a/.checkstyle/checkstyle.xml b/.checkstyle/checkstyle.xml new file mode 100644 index 00000000..32c0f31e --- /dev/null +++ b/.checkstyle/checkstyle.xml @@ -0,0 +1,382 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.github/workflows/on_pull_request.yml b/.github/workflows/on_pull_request.yml index 95d2c00c..3a9f0927 100644 --- a/.github/workflows/on_pull_request.yml +++ b/.github/workflows/on_pull_request.yml @@ -8,39 +8,42 @@ jobs: build: runs-on: ubuntu-latest steps: - - name: Checkout project - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up JDK 17 - uses: actions/setup-java@v3 - with: - java-version: '17' - distribution: 'temurin' - - - name: Cache SonarCloud packages - uses: actions/cache@v3 - with: - path: ~/.sonar/cache - key: ${{ runner.os }}-sonar - restore-keys: ${{ runner.os }}-sonar - - - name: Cache Gradle packages - uses: actions/cache@v3 - with: - path: ~/.gradle/caches - key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }} - restore-keys: ${{ runner.os }}-gradle - - - name: Build and analyze - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - run: ./gradlew build jacocoTestReport sonar --info - - - name: Publish test report - if: always() - uses: mikepenz/action-junit-report@v4 - with: - report_paths: '**/build/test-results/test/TEST-*.xml' + - name: Checkout project + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up JDK 17 + uses: actions/setup-java@v3 + with: + java-version: '17' + distribution: 'temurin' + + - name: Cache SonarCloud packages + uses: actions/cache@v3 + with: + path: ~/.sonar/cache + key: ${{ runner.os }}-sonar + restore-keys: ${{ runner.os }}-sonar + + - name: Cache Gradle packages + uses: actions/cache@v3 + with: + path: ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }} + restore-keys: ${{ runner.os }}-gradle + + - name: Check Style + run: ./gradlew checkstyleMain checkstyleTest + + - name: Build and analyze + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + run: ./gradlew build jacocoTestReport sonar --info + + - name: Publish test report + if: always() + uses: mikepenz/action-junit-report@v4 + with: + report_paths: '**/build/test-results/test/TEST-*.xml' \ No newline at end of file diff --git a/.github/workflows/on_push_master.yml b/.github/workflows/on_push_master.yml index c77415ce..f30d2152 100644 --- a/.github/workflows/on_push_master.yml +++ b/.github/workflows/on_push_master.yml @@ -39,6 +39,9 @@ jobs: key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }} restore-keys: ${{ runner.os }}-gradle + - name: Check Style + run: ./gradlew checkstyleMain checkstyleTest + - name: Build and analyze id: build_jar env: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6ba2b42d..e83638b9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,30 +2,43 @@ Welcome and thank you for considering contributing to Ns4kafka! -By following these guidelines, you can help make the contribution process easy and effective for everyone involved. It also shows that you agree to respect the time of the developers managing and developing these open source projects. In return, we will reciprocate that respect by addressing your issue, assessing changes, and helping you finalize your pull requests. +By following these guidelines, you can help make the contribution process easy and effective for everyone involved. It +also shows that you agree to respect the time of the developers managing and developing these open source projects. In +return, we will reciprocate that respect by addressing your issue, assessing changes, and helping you finalize your pull +requests. ## Getting Started ### Issues -Issues should be used to report problems, request a new feature, or to discuss potential changes before a PR is created. When you create a new Issue, a template will be loaded that will guide you through collecting and providing the information we need to investigate. +Issues should be used to report problems, request a new feature, or to discuss potential changes before a PR is created. +When you create a new Issue, a template will be loaded that will guide you through collecting and providing the +information we need to investigate. -If you find an existing issue that addresses the problem you're having, please add your own reproduction information to the existing issue instead of creating a new one. Adding a [reaction](https://github.blog/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) can also indicate to our maintainers that a particular problem is affecting more than just the reporter. +If you find an existing issue that addresses the problem you're having, please add your own reproduction information to +the existing issue instead of creating a new one. Adding +a [reaction](https://github.blog/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) can also indicate to +our maintainers that a particular problem is affecting more than just the reporter. -If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and a clear description, relevant information, and a code sample or executable test case demonstrating the expected behavior that is not occurring. +If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and a clear +description, relevant information, and a code sample or executable test case demonstrating the expected behavior that is +not occurring. ### Pull Requests -PRs are always welcome and can be a quick way to get your fix or improvement slated for the next release. In general, PRs should: +PRs are always welcome and can be a quick way to get your fix or improvement slated for the next release. In general, +PRs should: - Only fix/add the functionality in question OR address wide-spread style issues, not both. - Add unit or integration tests for fixed or changed functionality (if a test suite already exists). - Address a single concern in the least number of changed lines as possible. - Be accompanied by a complete Pull Request template (loaded automatically when a PR is created). -Be sure to use the past tense ("Added new feature...", "Fixed bug on...") and add tags to the PR ("documentation" for documentation updates, "bug" for bug fixing, etc.). +Be sure to use the past tense ("Added new feature...", "Fixed bug on...") and add tags to the PR ("documentation" for +documentation updates, "bug" for bug fixing, etc.). -For changes that address core functionality or would require breaking changes (e.g. a major release), it's best to open an Issue to discuss your proposal first. This is not required but can save time creating and reviewing changes. +For changes that address core functionality or would require breaking changes (e.g. a major release), it's best to open +an Issue to discuss your proposal first. This is not required but can save time creating and reviewing changes. In general, we follow the ["fork-and-pull" Git workflow](https://github.com/susam/gitpr) @@ -37,14 +50,22 @@ In general, we follow the ["fork-and-pull" Git workflow](https://github.com/susa - Push changes to your fork - Open a PR in our repository targeting master and follow the PR template so that we can efficiently review the changes. -## Styleguides +## Style Guide -### Git Commit Messages +### Code Style -When contributing to the project, it's important to follow a consistent style for Git commit messages. Here are some guidelines to keep in mind: +We maintain a consistent code style using [Checkstyle](https://checkstyle.sourceforge.io/). -- Use the present tense, such as "Add feature," rather than the past tense, such as "Added feature." -- Use the imperative mood, such as "Move cursor to..." rather than "Moves cursor to..." -- Limit the first line of the commit message to 72 characters or less. -- Use references to issues and pull requests after the first line as needed. -- If your commit only changes documentation, include `[ci skip]` in the commit title. +The configuration file is defined in the `.checkstyle` folder. +To perform Checkstyle validation, run the following: + +```bash +./gradlew checkstyleMain checkstyleTest +``` + +Before you start contributing new code, it is recommended to: + +- Install the IntelliJ [CheckStyle-IDEA](https://plugins.jetbrains.com/plugin/1065-checkstyle-idea) plugin. +- Configure the plugin to use Ns4Kafka's Checkstyle configuration file. + +Adhering to this code style ensures consistency and helps maintain code quality throughout the project. \ No newline at end of file diff --git a/README.md b/README.md index 6b1e2a8c..4cdb9214 100644 --- a/README.md +++ b/README.md @@ -8,52 +8,62 @@ [![Docker Pulls](https://img.shields.io/docker/pulls/michelin/ns4kafka?label=Pulls&logo=docker&style=for-the-badge)](https://hub.docker.com/r/michelin/ns4kafka/tags) [![Docker Stars](https://img.shields.io/docker/stars/michelin/ns4kafka?label=Stars&logo=docker&style=for-the-badge)](https://hub.docker.com/r/michelin/ns4kafka) [![SonarCloud Coverage](https://img.shields.io/sonar/coverage/michelin_ns4kafka?logo=sonarcloud&server=https%3A%2F%2Fsonarcloud.io&style=for-the-badge)](https://sonarcloud.io/component_measures?id=michelin_ns4kafka&metric=coverage&view=list) +[![SonarCloud Tests](https://img.shields.io/sonar/tests/michelin_ns4kafka/master?server=https%3A%2F%2Fsonarcloud.io&style=for-the-badge&logo=sonarcloud)](https://sonarcloud.io/component_measures?metric=tests&view=list&id=michelin_kstreamplify) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg?logo=apache&style=for-the-badge)](https://opensource.org/licenses/Apache-2.0) -Ns4Kafka introduces namespace functionality to Apache Kafka, as well as a new deployment model for Kafka resources using [Kafkactl](https://github.com/michelin/kafkactl), which follows best practices from Kubernetes. +Ns4Kafka introduces namespace functionality to Apache Kafka, as well as a new deployment model for Kafka resources +using [Kafkactl](https://github.com/michelin/kafkactl), which follows best practices from Kubernetes. ## Table of Contents * [Principles](#principles) - * [Namespace Isolation](#namespace-isolation) - * [Desired State](#desired-state) - * [Server Side Validation](#server-side-validation) - * [CLI](#cli) + * [Namespace Isolation](#namespace-isolation) + * [Desired State](#desired-state) + * [Server Side Validation](#server-side-validation) + * [CLI](#cli) * [Download](#download) * [Install](#install) * [Demo Environment](#demo-environment) * [Configuration](#configuration) - * [GitLab Authentication](#gitlab-authentication) - * [Admin Account](#admin-account) - * [Kafka Broker Authentication](#kafka-broker-authentication) - * [Managed clusters](#managed-clusters) - * [AKHQ](#akhq) + * [GitLab Authentication](#gitlab-authentication) + * [Admin Account](#admin-account) + * [Kafka Broker Authentication](#kafka-broker-authentication) + * [Managed clusters](#managed-clusters) + * [AKHQ](#akhq) * [Administration](#administration) * [Contribution](#contribution) ## Principles -Ns4Kafka is an API that provides controllers for listing, creating, and deleting various Kafka resources, including topics, connectors, schemas, and Kafka Connect clusters. The solution is built on several principles. +Ns4Kafka is an API that provides controllers for listing, creating, and deleting various Kafka resources, including +topics, connectors, schemas, and Kafka Connect clusters. The solution is built on several principles. ### Namespace Isolation -Ns4Kafka implements the concept of namespaces, which enable encapsulation of Kafka resources within specific namespaces. Each namespace can only view and manage the resources that belong to it, with other namespaces being isolated from each other. This isolation is achieved by assigning ownership of names and prefixes to specific namespaces. +Ns4Kafka implements the concept of namespaces, which enable encapsulation of Kafka resources within specific namespaces. +Each namespace can only view and manage the resources that belong to it, with other namespaces being isolated from each +other. This isolation is achieved by assigning ownership of names and prefixes to specific namespaces. ### Desired State -Whenever you deploy a Kafka resource using Ns4Kafka, the solution saves it to a dedicated topic and synchronizes the Kafka cluster to ensure that the resource's desired state is achieved. +Whenever you deploy a Kafka resource using Ns4Kafka, the solution saves it to a dedicated topic and synchronizes the +Kafka cluster to ensure that the resource's desired state is achieved. ### Server Side Validation -Ns4Kafka allows you to apply customizable validation rules to ensure that your resources are configured with the appropriate values. +Ns4Kafka allows you to apply customizable validation rules to ensure that your resources are configured with the +appropriate values. ### CLI -Ns4Kafka includes [Kafkactl](https://github.com/michelin/kafkactl), a command-line interface (CLI) that enables you to deploy your Kafka resources 'as code' within your namespace using YAML descriptors. This tool can also be used in continuous integration/continuous delivery (CI/CD) pipelines. +Ns4Kafka includes [Kafkactl](https://github.com/michelin/kafkactl), a command-line interface (CLI) that enables you to +deploy your Kafka resources 'as code' within your namespace using YAML descriptors. This tool can also be used in +continuous integration/continuous delivery (CI/CD) pipelines. ## Download -You can download Ns4Kafka as a fat jar from the project's releases page on GitHub at https://github.com/michelin/ns4kafka/releases. +You can download Ns4Kafka as a fat jar from the project's releases page on GitHub +at https://github.com/michelin/ns4kafka/releases. Additionally, a Docker image of the solution is available at https://hub.docker.com/repository/docker/michelin/ns4kafka. @@ -61,15 +71,18 @@ Additionally, a Docker image of the solution is available at https://hub.docker. To operate, Ns4Kafka requires a Kafka broker for data storage and GitLab for user authentication. -The solution is built on the [Micronaut framework](https://micronaut.io/) and can be configured with any [Micronaut property source loader](https://docs.micronaut.io/1.3.0.M1/guide/index.html#_included_propertysource_loaders). +The solution is built on the [Micronaut framework](https://micronaut.io/) and can be configured with +any [Micronaut property source loader](https://docs.micronaut.io/1.3.0.M1/guide/index.html#_included_propertysource_loaders). -To override the default properties from the `application.yml` file, you can set the `micronaut.config.file` system property when running the fat jar file, like so: +To override the default properties from the `application.yml` file, you can set the `micronaut.config.file` system +property when running the fat jar file, like so: ```console java -Dmicronaut.config.file=application.yml -jar ns4kafka.jar ``` -Alternatively, you can set the `MICRONAUT_CONFIG_FILE` environment variable and then run the jar file without additional parameters, as shown below: +Alternatively, you can set the `MICRONAUT_CONFIG_FILE` environment variable and then run the jar file without additional +parameters, as shown below: ```console MICRONAUT_CONFIG_FILE=application.yml @@ -85,6 +98,7 @@ docker-compose up -d ``` This command will start multiple containers, including: + - 1 Zookeeper - 1 Kafka broker - 1 Schema registry @@ -96,11 +110,16 @@ This command will start multiple containers, including: Please note that SASL/SCRAM authentication and authorization using ACLs are enabled on the broker. To get started, you'll need to perform the following steps: -1. Define a GitLab admin group for Ns4Kafka in the `application.yml` file. You can find an example [here](#admin-account). It is recommended to choose a GitLab group you belong to in order to have admin rights. -2. Define a GitLab token for Kafkactl in the `config.yml` file. You can refer to the installation instructions [here](https://github.com/michelin/kafkactl#install). -3. Define a GitLab group you belong to in the role bindings of the `resources/admin/namespace.yml` file. This is demonstrated in the example [here](https://github.com/michelin/kafkactl#role-binding). -## Configuration +1. Define a GitLab admin group for Ns4Kafka in the `application.yml` file. You can find an + example [here](#admin-account). It is recommended to choose a GitLab group you belong to in order to have admin + rights. +2. Define a GitLab token for Kafkactl in the `config.yml` file. You can refer to the installation + instructions [here](https://github.com/michelin/kafkactl#install). +3. Define a GitLab group you belong to in the role bindings of the `resources/admin/namespace.yml` file. This is + demonstrated in the example [here](https://github.com/michelin/kafkactl#role-binding). + +## Configuration ### GitLab Authentication @@ -131,7 +150,8 @@ ns4kafka: admin-group: "MY_ADMIN_GROUP" ``` -If the admin group is set to "MY_ADMIN_GROUP", users will be granted admin privileges if they belong to the GitLab group "MY_ADMIN_GROUP". +If the admin group is set to "MY_ADMIN_GROUP", users will be granted admin privileges if they belong to the GitLab +group "MY_ADMIN_GROUP". ### Kafka Broker Authentication @@ -179,7 +199,8 @@ ns4kafka: basicAuthPassword: "password" ``` -The name for each managed cluster has to be unique. This is this name you have to set in the field **metadata.cluster** of your namespace descriptors. +The name for each managed cluster has to be unique. This is this name you have to set in the field **metadata.cluster** +of your namespace descriptors. | Property | type | description | |-----------------------------------------|---------|-------------------------------------------------------------| @@ -201,9 +222,11 @@ The configuration will depend on the authentication method selected for your bro ### AKHQ -[AKHQ](https://github.com/tchiotludo/akhq) can be integrated with Ns4Kafka to provide access to resources within your namespace during the authentication process. +[AKHQ](https://github.com/tchiotludo/akhq) can be integrated with Ns4Kafka to provide access to resources within your +namespace during the authentication process. To enable this integration, follow these steps: + 1. Configure LDAP authentication in AKHQ. 2. Add the Ns4Kafka claim endpoint to AKHQ's configuration: @@ -219,7 +242,9 @@ For AKHQ versions from v0.20 to v0.24, use the `/akhq-claim/v2` endpoint. For AKHQ versions prior to v0.20, use the `/akhq-claim/v1` endpoint. 3. In your Ns4Kafka configuration, specify the following settings for AKHQ: + * For AKHQ versions v0.25 and later + ```yaml ns4kafka: akhq: @@ -239,6 +264,7 @@ ns4kafka: ``` * For AKHQ versions prior to v0.25 + ```yaml ns4kafka: akhq: @@ -276,12 +302,17 @@ metadata: support-group: NAMESPACE-LDAP-GROUP ``` -Once the configuration is in place, after successful authentication in AKHQ, users belonging to the `NAMESPACE-LDAP-GROUP` will be able to access the resources within the `myNamespace` namespace. +Once the configuration is in place, after successful authentication in AKHQ, users belonging to +the `NAMESPACE-LDAP-GROUP` will be able to access the resources within the `myNamespace` namespace. ## Administration -The setup of namespaces, owner ACLs, role bindings, and quotas is the responsibility of Ns4Kafka administrators, as these resources define the context in which project teams will work. To create your first namespace, please refer to the [Kafkactl documentation](https://github.com/michelin/kafkactl/blob/main/README.md#administrator). +The setup of namespaces, owner ACLs, role bindings, and quotas is the responsibility of Ns4Kafka administrators, as +these resources define the context in which project teams will work. To create your first namespace, please refer to +the [Kafkactl documentation](https://github.com/michelin/kafkactl/blob/main/README.md#administrator). ## Contribution - -We welcome contributions from the community! Before you get started, please take a look at our [contribution guide](https://github.com/michelin/ns4kafka/blob/master/CONTRIBUTING.md) to learn about our guidelines and best practices. We appreciate your help in making Ns4Kafka a better tool for everyone. + +We welcome contributions from the community! Before you get started, please take a look at +our [contribution guide](https://github.com/michelin/ns4kafka/blob/master/CONTRIBUTING.md) to learn about our guidelines +and best practices. We appreciate your help in making Ns4Kafka a better tool for everyone. diff --git a/build.gradle b/build.gradle index 1b30300f..6716ebcb 100644 --- a/build.gradle +++ b/build.gradle @@ -4,6 +4,7 @@ plugins { id("jacoco") id("org.sonarqube") version "4.3.1.3277" id("pl.allegro.tech.build.axion-release") version "1.15.4" + id("checkstyle") } version = scmVersion.version @@ -87,25 +88,25 @@ dockerfile { if (project.hasProperty("releaseLatest")) { dockerBuild { - images = ["michelin/ns4kafka:" + version, "michelin/ns4kafka:latest"] + images.set(["michelin/ns4kafka:" + version, "michelin/ns4kafka:latest"]) } } else { dockerBuild { - images = ["michelin/ns4kafka:" + version] + images.set(["michelin/ns4kafka:" + version]) } } -tasks.withType(JavaCompile) { +tasks.withType(JavaCompile).configureEach { options.fork = true options.forkOptions.jvmArgs << '-Dmicronaut.openapi.views.spec=rapidoc.enabled=true' } sonarqube { - properties { - property "sonar.projectKey", "michelin_ns4kafka" - property "sonar.organization", "michelin" - property "sonar.host.url", "https://sonarcloud.io" - } + properties { + property "sonar.projectKey", "michelin_ns4kafka" + property "sonar.organization", "michelin" + property "sonar.host.url", "https://sonarcloud.io" + } } jacocoTestReport { @@ -122,3 +123,11 @@ test { } } +checkstyle { + toolVersion = '10.12.3' + configFile = file(".checkstyle/checkstyle.xml") + ignoreFailures = false + maxErrors = 0 + maxWarnings = 0 +} + diff --git a/micronaut-cli.yml b/micronaut-cli.yml index 410d6888..f91ac003 100644 --- a/micronaut-cli.yml +++ b/micronaut-cli.yml @@ -3,4 +3,4 @@ defaultPackage: com.michelin.ns4kafka testFramework: junit sourceLanguage: java buildTool: gradle -features: [annotation-api, app-name, gradle, http-client, jackson-databind, java, java-application, junit, kafka, logback, lombok, micronaut-aot, micronaut-build, micronaut-http-validation, mockito, netty-server, openapi, reactor, reactor-http-client, readme, security, security-annotations, security-jwt, security-ldap, shade, testcontainers, validation] +features: [ annotation-api, app-name, gradle, http-client, jackson-databind, java, java-application, junit, kafka, logback, management, lombok, micronaut-aot, micronaut-build, micronaut-http-validation, mockito, netty-server, openapi, reactor, reactor-http-client, readme, security, security-annotations, security-jwt, security-ldap, shade, testcontainers, validation ] diff --git a/src/main/java/com/michelin/ns4kafka/Application.java b/src/main/java/com/michelin/ns4kafka/Application.java index d95546d4..182a61ec 100644 --- a/src/main/java/com/michelin/ns4kafka/Application.java +++ b/src/main/java/com/michelin/ns4kafka/Application.java @@ -9,21 +9,24 @@ import io.swagger.v3.oas.annotations.security.SecurityScheme; import io.swagger.v3.oas.annotations.tags.Tag; +/** + * Main class to start the application. + */ @SecurityScheme(name = "JWT", - type = SecuritySchemeType.HTTP, - scheme = "bearer", - bearerFormat = "JWT") + type = SecuritySchemeType.HTTP, + scheme = "bearer", + bearerFormat = "JWT") @OpenAPIDefinition( - security = @SecurityRequirement(name = "JWT"), - info = @Info( - title = "Ns4Kafka", - version = "0.1", - description = "Getting started with REST APIs." - ) + security = @SecurityRequirement(name = "JWT"), + info = @Info( + title = "Ns4Kafka", + version = "0.1", + description = "Getting started with REST APIs." + ) ) @OpenAPIInclude( - classes = { io.micronaut.security.endpoints.LoginController.class }, - tags = @Tag(name = "_Security", description = "All the login endpoints.") + classes = {io.micronaut.security.endpoints.LoginController.class}, + tags = @Tag(name = "_Security", description = "All the login endpoints.") ) public class Application { diff --git a/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java b/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java index 84ac9b81..0371a864 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java @@ -1,8 +1,8 @@ package com.michelin.ns4kafka.controllers; -import com.michelin.ns4kafka.config.AkhqClaimProviderControllerConfig; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.AccessControlEntry; +import com.michelin.ns4kafka.properties.AkhqProperties; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.AccessControlEntryService; import com.michelin.ns4kafka.services.NamespaceService; import io.micronaut.core.annotation.Introspected; @@ -11,17 +11,25 @@ import io.micronaut.http.annotation.Post; import io.micronaut.security.rules.SecurityRule; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.inject.Inject; -import lombok.Builder; -import lombok.Data; -import lombok.Getter; - import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; import jakarta.validation.Valid; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.regex.Pattern; import java.util.stream.Collectors; +import lombok.Builder; +import lombok.Data; +import lombok.Getter; +/** + * Controller to manage AKHQ claims. + */ @Tag(name = "AKHQ", description = "Manage the AKHQ endpoints.") @RolesAllowed(SecurityRule.IS_ANONYMOUS) @Controller("/akhq-claim") @@ -31,7 +39,7 @@ public class AkhqClaimProviderController { private static final List ADMIN_REGEXP = List.of(".*"); @Inject - AkhqClaimProviderControllerConfig config; + AkhqProperties config; @Inject AccessControlEntryService accessControlEntryService; @@ -40,104 +48,110 @@ public class AkhqClaimProviderController { NamespaceService namespaceService; @Inject - List managedClusters; + List managedClusters; /** - * List AKHQ claims (v019 and prior) + * List AKHQ claims (v019 and prior). + * * @param request The AKHQ request * @return The AKHQ claims */ @Post - public AKHQClaimResponse generateClaim(@Valid @Body AKHQClaimRequest request) { + public AkhqClaimResponse generateClaim(@Valid @Body AkhqClaimRequest request) { if (request == null) { - return AKHQClaimResponse.ofEmpty(config.getFormerRoles()); + return AkhqClaimResponse.ofEmpty(config.getFormerRoles()); } final List groups = Optional.ofNullable(request.getGroups()).orElse(new ArrayList<>()); if (groups.contains(config.getAdminGroup())) { - return AKHQClaimResponse.ofAdmin(config.getFormerAdminRoles()); + return AkhqClaimResponse.ofAdmin(config.getFormerAdminRoles()); } - List relatedACL = namespaceService.listAll() - .stream() - .filter(namespace -> namespace.getMetadata().getLabels() != null && - groups.contains(namespace.getMetadata().getLabels().getOrDefault(config.getGroupLabel(), "_"))) - .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream()) - .collect(Collectors.toList()); + List relatedAcl = namespaceService.listAll() + .stream() + .filter(namespace -> namespace.getMetadata().getLabels() != null + && groups.contains(namespace.getMetadata().getLabels().getOrDefault(config.getGroupLabel(), "_"))) + .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream()) + .collect(Collectors.toList()); // Add all public ACLs. - relatedACL.addAll(accessControlEntryService.findAllPublicGrantedTo()); - - return AKHQClaimResponse.builder() - .roles(config.getFormerRoles()) - .attributes( - Map.of( - "topicsFilterRegexp", computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.TOPIC), - "connectsFilterRegexp", computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.CONNECT), - "consumerGroupsFilterRegexp", ADMIN_REGEXP - ) + relatedAcl.addAll(accessControlEntryService.findAllPublicGrantedTo()); + + return AkhqClaimResponse.builder() + .roles(config.getFormerRoles()) + .attributes( + Map.of( + "topicsFilterRegexp", + computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.TOPIC), + "connectsFilterRegexp", + computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.CONNECT), + "consumerGroupsFilterRegexp", ADMIN_REGEXP ) - .build(); + ) + .build(); } /** - * List AKHQ claims (v020 to 024) + * List AKHQ claims (v020 to 024). + * * @param request The AKHQ request * @return The AKHQ claims */ @Post("/v2") - public AKHQClaimResponseV2 generateClaimV2(@Valid @Body AKHQClaimRequest request) { + public AkhqClaimResponseV2 generateClaimV2(@Valid @Body AkhqClaimRequest request) { if (request == null) { - return AKHQClaimResponseV2.ofEmpty(config.getFormerRoles()); + return AkhqClaimResponseV2.ofEmpty(config.getFormerRoles()); } final List groups = Optional.ofNullable(request.getGroups()).orElse(new ArrayList<>()); if (groups.contains(config.getAdminGroup())) { - return AKHQClaimResponseV2.ofAdmin(config.getFormerAdminRoles()); + return AkhqClaimResponseV2.ofAdmin(config.getFormerAdminRoles()); } - List relatedACL = getAllAclForGroups(groups); + List relatedAcl = getAllAclForGroups(groups); // Add all public ACLs. - relatedACL.addAll(accessControlEntryService.findAllPublicGrantedTo()); - - return AKHQClaimResponseV2.builder() - .roles(config.getFormerRoles()) - .topicsFilterRegexp(computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.TOPIC)) - .connectsFilterRegexp(computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.CONNECT)) - .consumerGroupsFilterRegexp(ADMIN_REGEXP) - .build(); + relatedAcl.addAll(accessControlEntryService.findAllPublicGrantedTo()); + + return AkhqClaimResponseV2.builder() + .roles(config.getFormerRoles()) + .topicsFilterRegexp( + computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.TOPIC)) + .connectsFilterRegexp( + computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.CONNECT)) + .consumerGroupsFilterRegexp(ADMIN_REGEXP) + .build(); } /** - * List AKHQ claims (v025 and higher) + * List AKHQ claims (v025 and higher). * * @param request The AKHQ request * @return The AKHQ claims */ @Post("/v3") - public AKHQClaimResponseV3 generateClaimV3(@Valid @Body AKHQClaimRequest request) { + public AkhqClaimResponseV3 generateClaimV3(@Valid @Body AkhqClaimRequest request) { final List groups = Optional.ofNullable(request.getGroups()).orElse(new ArrayList<>()); if (groups.contains(config.getAdminGroup())) { - return AKHQClaimResponseV3.ofAdmin(config.getAdminRoles()); + return AkhqClaimResponseV3.ofAdmin(config.getAdminRoles()); } - List relatedACL = getAllAclForGroups(groups); + List relatedAcl = getAllAclForGroups(groups); // Add all public ACLs - relatedACL.addAll(accessControlEntryService.findAllPublicGrantedTo()); + relatedAcl.addAll(accessControlEntryService.findAllPublicGrantedTo()); // Remove unnecessary ACLs (project.topic1 when project.* is granted on the same resource type and cluster) - optimizeACL(relatedACL); + optimizeAcl(relatedAcl); - Map bindings = new LinkedHashMap<>(); + Map bindings = new LinkedHashMap<>(); // Start by creating a map that store permissions by role/cluster - relatedACL.forEach(acl -> { + relatedAcl.forEach(acl -> { String escapedString = Pattern.quote(acl.getSpec().getResource()); String patternRegex; @@ -163,29 +177,51 @@ public AKHQClaimResponseV3 generateClaimV3(@Valid @Body AKHQClaimRequest request clusters.add(patternCluster); // Otherwise we add a new one - bindings.put(key, AKHQClaimResponseV3.Group.builder() - .role(role) - .patterns(regexes) - .clusters(clusters) - .build()); + bindings.put(key, AkhqClaimResponseV3.Group.builder() + .role(role) + .patterns(regexes) + .clusters(clusters) + .build()); } }); - List result = optimizeV3Claim(bindings); + List result = optimizeV3Claim(bindings); // Add the same pattern and cluster filtering for SCHEMA as the TOPIC ones result.addAll(result.stream() - .filter(g -> g.role.equals(config.getRoles().get(AccessControlEntry.ResourceType.TOPIC))) - .map(g -> AKHQClaimResponseV3.Group.builder() - .role(config.getRoles().get(AccessControlEntry.ResourceType.SCHEMA)) - .patterns(g.getPatterns()) - .clusters(g.getClusters()) - .build() - ).toList()); - - return AKHQClaimResponseV3.builder() - .groups(result.isEmpty() ? null : Map.of("group", result)) - .build(); + .filter(g -> g.role.equals(config.getRoles().get(AccessControlEntry.ResourceType.TOPIC))) + .map(g -> AkhqClaimResponseV3.Group.builder() + .role(config.getRoles().get(AccessControlEntry.ResourceType.SCHEMA)) + .patterns(g.getPatterns()) + .clusters(g.getClusters()) + .build() + ).toList()); + + return AkhqClaimResponseV3.builder() + .groups(result.isEmpty() ? null : Map.of("group", result)) + .build(); + } + + /** + * Remove ACL that are already included by another ACL on the same resource and cluster + * Ex: LITERAL ACL1 with project.topic1 resource + PREFIXED ACL2 with project -> return ACL2 only + * + * @param acl the input list of acl to optimize + */ + private void optimizeAcl(List acl) { + acl.removeIf(accessControlEntry -> acl.stream() + // Keep PREFIXED ACL with a different resource but same resource type and cluster + .filter(accessControlEntryOther -> accessControlEntryOther.getSpec().getResourcePatternType() + .equals(AccessControlEntry.ResourcePatternType.PREFIXED) + && + !accessControlEntryOther.getSpec().getResource().equals(accessControlEntry.getSpec().getResource()) + && accessControlEntryOther.getSpec().getResourceType() + .equals(accessControlEntry.getSpec().getResourceType()) + && accessControlEntryOther.getMetadata().getCluster() + .equals(accessControlEntry.getMetadata().getCluster())) + .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource()) + // Remove the ACL if there is one that contains the current resource + .anyMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString))); } /** @@ -194,8 +230,8 @@ public AKHQClaimResponseV3 generateClaimV3(@Valid @Body AKHQClaimRequest request * @param bindings - the raw claim * @return an optimized claim */ - private List optimizeV3Claim(Map bindings) { - List result = new ArrayList<>(); + private List optimizeV3Claim(Map bindings) { + List result = new ArrayList<>(); // Extract the clusters name from the managedClusters configuration List clusters = managedClusters.stream().map(c -> String.format("^%s$", c.getName())).toList(); @@ -208,173 +244,201 @@ private List optimizeV3Claim(Map result.stream() - // Search bindings with the same role and cluster filtering - .filter(r -> r.role.equals(value.role) && r.clusters.size() == value.clusters.size() - && new HashSet<>(r.clusters).containsAll(value.clusters) - && new HashSet<>(value.clusters).containsAll(r.clusters)) - .findFirst() - .ifPresentOrElse( - // If there is any we can merge the patterns and keep only 1 binding - toMerge -> toMerge.patterns.addAll(value.getPatterns()), - // Otherwise we add the current binding - () -> result.add(value) - )); + // Search bindings with the same role and cluster filtering + .filter(r -> r.role.equals(value.role) && r.clusters.size() == value.clusters.size() + && new HashSet<>(r.clusters).containsAll(value.clusters) + && new HashSet<>(value.clusters).containsAll(r.clusters)) + .findFirst() + .ifPresentOrElse( + // If there is any we can merge the patterns and keep only 1 binding + toMerge -> toMerge.patterns.addAll(value.getPatterns()), + // Otherwise we add the current binding + () -> result.add(value) + )); return result; } /** - * List all the ACL for a user based on its LDAP groups + * List all the ACL for a user based on its LDAP groups. + * * @param groups the user LDAP groups * @return the user's ACL */ private List getAllAclForGroups(List groups) { return namespaceService.listAll() - .stream() - .filter(namespace -> namespace.getMetadata().getLabels() != null && - // Split by comma the groupLabel to support multiple groups and compare with user groups - !Collections.disjoint(groups, - List.of(namespace.getMetadata().getLabels() - .getOrDefault(config.getGroupLabel(), "_") - .split(",")))) - .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream()) - .collect(Collectors.toList()); + .stream() + .filter(namespace -> namespace.getMetadata().getLabels() != null + // Split by comma the groupLabel to support multiple groups and compare with user groups + && !Collections.disjoint(groups, List.of(namespace.getMetadata().getLabels() + .getOrDefault(config.getGroupLabel(), "_") + .split(",")))) + .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream()) + .collect(Collectors.toList()); } /** - * Compute AKHQ regexes from given ACLs - * @param acls The ACLs + * Compute AKHQ regexes from given ACLs. + * + * @param acls The ACLs * @param resourceType The resource type * @return A list of regex */ - public List computeAllowedRegexListForResourceType(List acls, AccessControlEntry.ResourceType resourceType) { + public List computeAllowedRegexListForResourceType(List acls, + AccessControlEntry.ResourceType resourceType) { List allowedRegex = acls.stream() - .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == resourceType) - .filter(accessControlEntry -> - acls.stream() - .filter(accessControlEntryOther -> !accessControlEntryOther.getSpec().getResource().equals(accessControlEntry.getSpec().getResource())) - .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource()) - .noneMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString))) - .map(accessControlEntry -> { - String escapedString = Pattern.quote(accessControlEntry.getSpec().getResource()); - if (accessControlEntry.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED) { - return String.format("^%s.*$", escapedString); - } else { - return String.format("^%s$", escapedString); - } - }) - .distinct() - .toList(); + .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == resourceType) + .filter(accessControlEntry -> + acls.stream() + .filter(accessControlEntryOther -> !accessControlEntryOther.getSpec().getResource() + .equals(accessControlEntry.getSpec().getResource())) + .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource()) + .noneMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString))) + .map(accessControlEntry -> { + String escapedString = Pattern.quote(accessControlEntry.getSpec().getResource()); + if (accessControlEntry.getSpec().getResourcePatternType() + == AccessControlEntry.ResourcePatternType.PREFIXED) { + return String.format("^%s.*$", escapedString); + } else { + return String.format("^%s$", escapedString); + } + }) + .distinct() + .toList(); //AKHQ considers empty list as "^.*$" so we must return something return !allowedRegex.isEmpty() ? allowedRegex : EMPTY_REGEXP; } /** - * Remove ACL that are already included by another ACL on the same resource and cluster - * Ex: LITERAL ACL1 with project.topic1 resource + PREFIXED ACL2 with project -> return ACL2 only - * - * @param acl the input list of acl to optimize + * AKHQ request. */ - private static void optimizeACL(List acl) { - acl.removeIf(accessControlEntry -> acl.stream() - // Keep PREFIXED ACL with a different resource but same resource type and cluster - .filter(accessControlEntryOther -> - accessControlEntryOther.getSpec().getResourcePatternType().equals(AccessControlEntry.ResourcePatternType.PREFIXED) - && !accessControlEntryOther.getSpec().getResource().equals(accessControlEntry.getSpec().getResource()) - && accessControlEntryOther.getSpec().getResourceType().equals(accessControlEntry.getSpec().getResourceType()) - && accessControlEntryOther.getMetadata().getCluster().equals(accessControlEntry.getMetadata().getCluster())) - .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource()) - // Remove the ACL if there is one that contains the current resource - .anyMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString))); - } - @Introspected @Builder @Getter - public static class AKHQClaimRequest { + public static class AkhqClaimRequest { String providerType; String providerName; String username; List groups; } + /** + * AKHQ response. + */ @Introspected @Builder @Getter - public static class AKHQClaimResponse { + public static class AkhqClaimResponse { private List roles; private Map> attributes; - public static AKHQClaimResponse ofEmpty(List roles) { - return AKHQClaimResponse.builder() - .roles(roles) - .attributes(Map.of( - //AKHQ considers empty list as "^.*$" so we must return something - "topicsFilterRegexp", EMPTY_REGEXP, - "connectsFilterRegexp", EMPTY_REGEXP, - "consumerGroupsFilterRegexp", EMPTY_REGEXP - )) - .build(); + /** + * Build an empty AKHQ response. + * + * @param roles the roles + * @return the AKHQ response + */ + public static AkhqClaimResponse ofEmpty(List roles) { + return AkhqClaimResponse.builder() + .roles(roles) + .attributes(Map.of( + // AKHQ considers empty list as "^.*$" so we must return something + "topicsFilterRegexp", EMPTY_REGEXP, + "connectsFilterRegexp", EMPTY_REGEXP, + "consumerGroupsFilterRegexp", EMPTY_REGEXP + )) + .build(); } - public static AKHQClaimResponse ofAdmin(List roles) { - - return AKHQClaimResponse.builder() - .roles(roles) - .attributes(Map.of( - //AKHQ considers empty list as "^.*$" so we must return something - "topicsFilterRegexp", ADMIN_REGEXP, - "connectsFilterRegexp", ADMIN_REGEXP, - "consumerGroupsFilterRegexp", ADMIN_REGEXP - )) - .build(); + /** + * Build an AKHQ response for an admin. + * + * @param roles the roles + * @return the AKHQ response + */ + public static AkhqClaimResponse ofAdmin(List roles) { + return AkhqClaimResponse.builder() + .roles(roles) + .attributes(Map.of( + // AKHQ considers empty list as "^.*$" so we must return something + "topicsFilterRegexp", ADMIN_REGEXP, + "connectsFilterRegexp", ADMIN_REGEXP, + "consumerGroupsFilterRegexp", ADMIN_REGEXP + )) + .build(); } } + /** + * AKHQ response (v2). + */ @Introspected @Builder @Getter - public static class AKHQClaimResponseV2 { + public static class AkhqClaimResponseV2 { private List roles; private List topicsFilterRegexp; private List connectsFilterRegexp; private List consumerGroupsFilterRegexp; - public static AKHQClaimResponseV2 ofEmpty(List roles) { - return AKHQClaimResponseV2.builder() - .roles(roles) - .topicsFilterRegexp(EMPTY_REGEXP) - .connectsFilterRegexp(EMPTY_REGEXP) - .consumerGroupsFilterRegexp(EMPTY_REGEXP) - .build(); + /** + * Build an empty AKHQ response. + * + * @param roles the roles + * @return the AKHQ response + */ + public static AkhqClaimResponseV2 ofEmpty(List roles) { + return AkhqClaimResponseV2.builder() + .roles(roles) + .topicsFilterRegexp(EMPTY_REGEXP) + .connectsFilterRegexp(EMPTY_REGEXP) + .consumerGroupsFilterRegexp(EMPTY_REGEXP) + .build(); } - public static AKHQClaimResponseV2 ofAdmin(List roles) { - - return AKHQClaimResponseV2.builder() - .roles(roles) - .topicsFilterRegexp(ADMIN_REGEXP) - .connectsFilterRegexp(ADMIN_REGEXP) - .consumerGroupsFilterRegexp(ADMIN_REGEXP) - .build(); + /** + * Build an AKHQ response for an admin. + * + * @param roles the roles + * @return the AKHQ response + */ + public static AkhqClaimResponseV2 ofAdmin(List roles) { + return AkhqClaimResponseV2.builder() + .roles(roles) + .topicsFilterRegexp(ADMIN_REGEXP) + .connectsFilterRegexp(ADMIN_REGEXP) + .consumerGroupsFilterRegexp(ADMIN_REGEXP) + .build(); } } + /** + * AKHQ response (v3). + */ @Introspected @Builder @Getter - public static class AKHQClaimResponseV3 { + public static class AkhqClaimResponseV3 { private Map> groups; - public static AKHQClaimResponseV3 ofAdmin(Map newAdminRoles) { - return AKHQClaimResponseV3.builder() - .groups(Map.of("group", - newAdminRoles.values().stream() - .map(r -> Group.builder().role(r).build()).collect(Collectors.toList()))) - .build(); + /** + * Build an AKHQ response for an admin. + * + * @param newAdminRoles the roles + * @return the AKHQ response + */ + public static AkhqClaimResponseV3 ofAdmin(Map newAdminRoles) { + return AkhqClaimResponseV3.builder() + .groups(Map.of("group", + newAdminRoles.values().stream() + .map(r -> Group.builder().role(r).build()).collect(Collectors.toList()))) + .build(); } + /** + * AKHQ group. + */ @Data @Builder @Introspected diff --git a/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java b/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java index 68d5aca0..3e6520cd 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java @@ -10,150 +10,152 @@ import io.micronaut.security.authentication.Authentication; import io.micronaut.security.rules.SecurityRule; import io.swagger.v3.oas.annotations.tags.Tag; +import jakarta.annotation.security.RolesAllowed; import jakarta.inject.Inject; +import java.util.Collection; +import java.util.List; import lombok.Builder; import lombok.Getter; import lombok.Setter; -import jakarta.annotation.security.RolesAllowed; -import java.util.Collection; -import java.util.List; -import java.util.stream.Collectors; - +/** + * Controller to manage API resources. + */ @Tag(name = "Resources", description = "Manage the API resources.") @RolesAllowed(SecurityRule.IS_ANONYMOUS) @Controller("/api-resources") public class ApiResourcesController { /** - * ACL resource definition + * ACL resource definition. */ public static final ResourceDefinition ACL = ResourceDefinition.builder() - .kind("AccessControlEntry") - .namespaced(true) - .synchronizable(false) - .path("acls") - .names(List.of("acls", "acl", "ac")) - .build(); + .kind("AccessControlEntry") + .namespaced(true) + .synchronizable(false) + .path("acls") + .names(List.of("acls", "acl", "ac")) + .build(); /** - * Connector resource definition + * Connector resource definition. */ public static final ResourceDefinition CONNECTOR = ResourceDefinition.builder() - .kind("Connector") - .namespaced(true) - .synchronizable(true) - .path("connectors") - .names(List.of("connects", "connect", "co")) - .build(); + .kind("Connector") + .namespaced(true) + .synchronizable(true) + .path("connectors") + .names(List.of("connects", "connect", "co")) + .build(); /** - * Kafka Streams resource definition + * Kafka Streams resource definition. */ public static final ResourceDefinition KSTREAM = ResourceDefinition.builder() - .kind("KafkaStream") - .namespaced(true) - .synchronizable(false) - .path("streams") - .names(List.of("streams", "stream", "st")) - .build(); + .kind("KafkaStream") + .namespaced(true) + .synchronizable(false) + .path("streams") + .names(List.of("streams", "stream", "st")) + .build(); /** - * Role binding resource definition + * Role binding resource definition. */ public static final ResourceDefinition ROLE_BINDING = ResourceDefinition.builder() - .kind("RoleBinding") - .namespaced(true) - .synchronizable(false) - .path("role-bindings") - .names(List.of("rolebindings", "rolebinding", "rb")) - .build(); + .kind("RoleBinding") + .namespaced(true) + .synchronizable(false) + .path("role-bindings") + .names(List.of("rolebindings", "rolebinding", "rb")) + .build(); /** - * Topic resource definition + * Topic resource definition. */ public static final ResourceDefinition TOPIC = ResourceDefinition.builder() - .kind("Topic") - .namespaced(true) - .synchronizable(true) - .path("topics") - .names(List.of("topics", "topic", "to")) - .build(); + .kind("Topic") + .namespaced(true) + .synchronizable(true) + .path("topics") + .names(List.of("topics", "topic", "to")) + .build(); /** - * Schema resource definition + * Schema resource definition. */ public static final ResourceDefinition SCHEMA = ResourceDefinition.builder() - .kind("Schema") - .namespaced(true) - .synchronizable(false) - .path("schemas") - .names(List.of("schemas", "schema", "sc")) - .build(); + .kind("Schema") + .namespaced(true) + .synchronizable(false) + .path("schemas") + .names(List.of("schemas", "schema", "sc")) + .build(); /** - * Resource quota resource definition + * Resource quota resource definition. */ public static final ResourceDefinition RESOURCE_QUOTA = ResourceDefinition.builder() - .kind("ResourceQuota") - .namespaced(true) - .synchronizable(false) - .path("resource-quotas") - .names(List.of("resource-quotas", "resource-quota", "quotas", "quota", "qu")) - .build(); + .kind("ResourceQuota") + .namespaced(true) + .synchronizable(false) + .path("resource-quotas") + .names(List.of("resource-quotas", "resource-quota", "quotas", "quota", "qu")) + .build(); /** - * Connect worker resource definition + * Connect worker resource definition. */ public static final ResourceDefinition CONNECT_CLUSTER = ResourceDefinition.builder() - .kind("ConnectCluster") - .namespaced(true) - .synchronizable(false) - .path("connect-clusters") - .names(List.of("connect-clusters", "connect-cluster", "cc")) - .build(); + .kind("ConnectCluster") + .namespaced(true) + .synchronizable(false) + .path("connect-clusters") + .names(List.of("connect-clusters", "connect-cluster", "cc")) + .build(); /** - * Namespace resource definition + * Namespace resource definition. */ public static final ResourceDefinition NAMESPACE = ResourceDefinition.builder() - .kind("Namespace") - .namespaced(false) - .synchronizable(false) - .path("namespaces") - .names(List.of("namespaces", "namespace", "ns")) - .build(); + .kind("Namespace") + .namespaced(false) + .synchronizable(false) + .path("namespaces") + .names(List.of("namespaces", "namespace", "ns")) + .build(); /** - * Role binding repository + * Role binding repository. */ @Inject RoleBindingRepository roleBindingRepository; /** - * List API resources + * List API resources. + * * @param authentication The authentication * @return The list of API resources */ @Get public List list(@Nullable Authentication authentication) { List all = List.of( - ACL, - CONNECTOR, - KSTREAM, - ROLE_BINDING, - RESOURCE_QUOTA, - CONNECT_CLUSTER, - TOPIC, - NAMESPACE, - SCHEMA + ACL, + CONNECTOR, + KSTREAM, + ROLE_BINDING, + RESOURCE_QUOTA, + CONNECT_CLUSTER, + TOPIC, + NAMESPACE, + SCHEMA ); if (authentication == null) { return all; // Backward compatibility for cli <= 1.3.0 } - List roles = (List)authentication.getAttributes().getOrDefault("roles", List.of()); - List groups = (List) authentication.getAttributes().getOrDefault("groups",List.of()); + List roles = (List) authentication.getAttributes().getOrDefault("roles", List.of()); + List groups = (List) authentication.getAttributes().getOrDefault("groups", List.of()); if (roles.contains(ResourceBasedSecurityRule.IS_ADMIN)) { return all; @@ -161,15 +163,18 @@ public List list(@Nullable Authentication authentication) { Collection roleBindings = roleBindingRepository.findAllForGroups(groups); List authorizedResources = roleBindings.stream() - .flatMap(roleBinding -> roleBinding.getSpec().getRole().getResourceTypes().stream()) - .distinct() - .toList(); + .flatMap(roleBinding -> roleBinding.getSpec().getRole().getResourceTypes().stream()) + .distinct() + .toList(); return all.stream() - .filter(resourceDefinition -> authorizedResources.contains(resourceDefinition.getPath())) - .toList(); + .filter(resourceDefinition -> authorizedResources.contains(resourceDefinition.getPath())) + .toList(); } + /** + * API resource definition. + */ @Introspected @Builder @Getter diff --git a/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java b/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java index 336c5218..24c3864f 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java @@ -11,20 +11,28 @@ import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.http.MutableHttpResponse; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.micronaut.scheduling.TaskExecutors; import io.micronaut.scheduling.annotation.ExecuteOn; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Date; import java.util.List; import java.util.Optional; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +/** + * Controller to manage connectors. + */ @Tag(name = "Connectors", description = "Manage the connectors.") @Controller(value = "/api/namespaces/{namespace}/connectors") @ExecuteOn(TaskExecutors.IO) @@ -38,7 +46,8 @@ public class ConnectorController extends NamespacedResourceController { ResourceQuotaService resourceQuotaService; /** - * List connectors by namespace + * List connectors by namespace. + * * @param namespace The namespace * @return A list of connectors */ @@ -48,7 +57,8 @@ public List list(String namespace) { } /** - * Get a connector by namespace and name + * Get a connector by namespace and name. + * * @param namespace The namespace * @param connector The name * @return A connector @@ -59,21 +69,23 @@ public Optional getConnector(String namespace, String connector) { } /** - * Delete a connector + * Delete a connector. + * * @param namespace The current namespace * @param connector The current connector name to delete - * @param dryrun Run in dry mode or not + * @param dryrun Run in dry mode or not * @return A HTTP response */ @Status(HttpStatus.NO_CONTENT) @Delete("/{connector}{?dryrun}") - public Mono> deleteConnector(String namespace, String connector, @QueryValue(defaultValue = "false") boolean dryrun) { + public Mono> deleteConnector(String namespace, String connector, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); // Validate ownership if (!connectorService.isNamespaceOwnerOfConnect(ns, connector)) { return Mono.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector)), - "Connector", connector)); + "Connector", connector)); } Optional optionalConnector = connectorService.findByName(ns, connector); @@ -87,31 +99,34 @@ public Mono> deleteConnector(String namespace, String connect Connector connectorToDelete = optionalConnector.get(); sendEventLog(connectorToDelete.getKind(), - connectorToDelete.getMetadata(), - ApplyStatus.deleted, - connectorToDelete.getSpec(), - null); + connectorToDelete.getMetadata(), + ApplyStatus.deleted, + connectorToDelete.getSpec(), + null); return connectorService - .delete(ns, optionalConnector.get()) - .map(httpResponse -> HttpResponse.noContent()); + .delete(ns, optionalConnector.get()) + .map(httpResponse -> HttpResponse.noContent()); } /** - * Create a connector + * Create a connector. + * * @param namespace The namespace - * @param connector The connector to create - * @param dryrun Does the creation is a dry run + * @param connector The connector to create + * @param dryrun Does the creation is a dry run * @return The created connector */ @Post("{?dryrun}") - public Mono> apply(String namespace, @Valid @Body Connector connector, @QueryValue(defaultValue = "false") boolean dryrun) { + public Mono> apply(String namespace, @Valid @Body Connector connector, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); // Validate ownership if (!connectorService.isNamespaceOwnerOfConnect(ns, connector.getMetadata().getName())) { - return Mono.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector.getMetadata().getName())), - connector.getKind(), connector.getMetadata().getName())); + return Mono.error(new ResourceValidationException( + List.of(String.format(NAMESPACE_NOT_OWNER, connector.getMetadata().getName())), + connector.getKind(), connector.getMetadata().getName())); } // Set / Override name in spec.config.name, required for several Kafka Connect API calls @@ -126,67 +141,74 @@ public Mono> apply(String namespace, @Valid @Body Connec // Validate locally return connectorService.validateLocally(ns, connector) - .flatMap(validationErrors -> { - if (!validationErrors.isEmpty()) { - return Mono.error(new ResourceValidationException(validationErrors, connector.getKind(), connector.getMetadata().getName())); - } - - // Validate against connect rest API /validate - return connectorService.validateRemotely(ns, connector) - .flatMap(remoteValidationErrors -> { - if (!remoteValidationErrors.isEmpty()) { - return Mono.error(new ResourceValidationException(remoteValidationErrors, connector.getKind(), connector.getMetadata().getName())); - } - - // Augment with server side fields - connector.getMetadata().setCreationTimestamp(Date.from(Instant.now())); - connector.getMetadata().setCluster(ns.getMetadata().getCluster()); - connector.getMetadata().setNamespace(ns.getMetadata().getName()); - connector.setStatus(Connector.ConnectorStatus.builder() - .state(Connector.TaskState.UNASSIGNED) - .build()); - - Optional existingConnector = connectorService.findByName(ns, connector.getMetadata().getName()); - if (existingConnector.isPresent() && existingConnector.get().equals(connector)) { - return Mono.just(formatHttpResponse(existingConnector.get(), ApplyStatus.unchanged)); - } - - ApplyStatus status = existingConnector.isPresent() ? ApplyStatus.changed : ApplyStatus.created; - - // Only check quota on connector creation - if (status.equals(ApplyStatus.created)) { - List quotaErrors = resourceQuotaService.validateConnectorQuota(ns); - if (!quotaErrors.isEmpty()) { - return Mono.error(new ResourceValidationException(quotaErrors, connector.getKind(), connector.getMetadata().getName())); - } - } - - if (dryrun) { - return Mono.just(formatHttpResponse(connector, status)); - } - - sendEventLog(connector.getKind(), connector.getMetadata(), status, - existingConnector.map(Connector::getSpec).orElse(null), connector.getSpec()); - - return Mono.just(formatHttpResponse(connectorService.createOrUpdate(connector), status)); - }); - }); + .flatMap(validationErrors -> { + if (!validationErrors.isEmpty()) { + return Mono.error(new ResourceValidationException(validationErrors, connector.getKind(), + connector.getMetadata().getName())); + } + + // Validate against connect rest API /validate + return connectorService.validateRemotely(ns, connector) + .flatMap(remoteValidationErrors -> { + if (!remoteValidationErrors.isEmpty()) { + return Mono.error( + new ResourceValidationException(remoteValidationErrors, connector.getKind(), + connector.getMetadata().getName())); + } + + // Augment with server side fields + connector.getMetadata().setCreationTimestamp(Date.from(Instant.now())); + connector.getMetadata().setCluster(ns.getMetadata().getCluster()); + connector.getMetadata().setNamespace(ns.getMetadata().getName()); + connector.setStatus(Connector.ConnectorStatus.builder() + .state(Connector.TaskState.UNASSIGNED) + .build()); + + Optional existingConnector = + connectorService.findByName(ns, connector.getMetadata().getName()); + if (existingConnector.isPresent() && existingConnector.get().equals(connector)) { + return Mono.just(formatHttpResponse(existingConnector.get(), ApplyStatus.unchanged)); + } + + ApplyStatus status = existingConnector.isPresent() ? ApplyStatus.changed : ApplyStatus.created; + + // Only check quota on connector creation + if (status.equals(ApplyStatus.created)) { + List quotaErrors = resourceQuotaService.validateConnectorQuota(ns); + if (!quotaErrors.isEmpty()) { + return Mono.error(new ResourceValidationException(quotaErrors, connector.getKind(), + connector.getMetadata().getName())); + } + } + + if (dryrun) { + return Mono.just(formatHttpResponse(connector, status)); + } + + sendEventLog(connector.getKind(), connector.getMetadata(), status, + existingConnector.map(Connector::getSpec).orElse(null), connector.getSpec()); + + return Mono.just(formatHttpResponse(connectorService.createOrUpdate(connector), status)); + }); + }); } /** - * Change the state of a connector - * @param namespace The namespace - * @param connector The connector to update the state + * Change the state of a connector. + * + * @param namespace The namespace + * @param connector The connector to update the state * @param changeConnectorState The state to set * @return The change connector state response */ @Post("/{connector}/change-state") - public Mono> changeState(String namespace, String connector, @Body @Valid ChangeConnectorState changeConnectorState) { + public Mono> changeState( + String namespace, String connector, @Body @Valid ChangeConnectorState changeConnectorState) { Namespace ns = getNamespace(namespace); if (!connectorService.isNamespaceOwnerOfConnect(ns, connector)) { return Mono.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector)), - "Connector", connector)); + "Connector", connector)); } Optional optionalConnector = connectorService.findByName(ns, connector); @@ -197,45 +219,42 @@ public Mono> changeState(String namesp Mono> response; switch (changeConnectorState.getSpec().getAction()) { - case restart: - response = connectorService.restart(ns, optionalConnector.get()); - break; - case pause: - response = connectorService.pause(ns, optionalConnector.get()); - break; - case resume: - response = connectorService.resume(ns, optionalConnector.get()); - break; - default: - return Mono.error(new IllegalStateException("Unspecified action " + changeConnectorState.getSpec().getAction())); + case restart -> response = connectorService.restart(ns, optionalConnector.get()); + case pause -> response = connectorService.pause(ns, optionalConnector.get()); + case resume -> response = connectorService.resume(ns, optionalConnector.get()); + default -> { + return Mono.error( + new IllegalStateException("Unspecified action " + changeConnectorState.getSpec().getAction())); + } } return response - .doOnSuccess(success -> { - changeConnectorState.setStatus(ChangeConnectorState.ChangeConnectorStateStatus.builder() - .success(true) - .code(success.status()) - .build()); - changeConnectorState.setMetadata(optionalConnector.get().getMetadata()); - changeConnectorState.getMetadata().setCreationTimestamp(Date.from(Instant.now())); - }) - .doOnError(error -> { - changeConnectorState.setStatus(ChangeConnectorState.ChangeConnectorStateStatus.builder() - .success(false) - .code(HttpStatus.INTERNAL_SERVER_ERROR) - .errorMessage(error.getMessage()) - .build()); - changeConnectorState.setMetadata(optionalConnector.get().getMetadata()); - changeConnectorState.getMetadata().setCreationTimestamp(Date.from(Instant.now())); - }) - .map(httpResponse -> HttpResponse.ok(changeConnectorState)) - .onErrorReturn(HttpResponse.ok(changeConnectorState)); + .doOnSuccess(success -> { + changeConnectorState.setStatus(ChangeConnectorState.ChangeConnectorStateStatus.builder() + .success(true) + .code(success.status()) + .build()); + changeConnectorState.setMetadata(optionalConnector.get().getMetadata()); + changeConnectorState.getMetadata().setCreationTimestamp(Date.from(Instant.now())); + }) + .doOnError(error -> { + changeConnectorState.setStatus(ChangeConnectorState.ChangeConnectorStateStatus.builder() + .success(false) + .code(HttpStatus.INTERNAL_SERVER_ERROR) + .errorMessage(error.getMessage()) + .build()); + changeConnectorState.setMetadata(optionalConnector.get().getMetadata()); + changeConnectorState.getMetadata().setCreationTimestamp(Date.from(Instant.now())); + }) + .map(httpResponse -> HttpResponse.ok(changeConnectorState)) + .onErrorReturn(HttpResponse.ok(changeConnectorState)); } /** - * Import unsynchronized connectors + * Import unsynchronized connectors. + * * @param namespace The namespace - * @param dryrun Is dry run mode or not ? + * @param dryrun Is dry run mode or not ? * @return The list of imported connectors */ @Post("/_/import{?dryrun}") @@ -251,7 +270,8 @@ public Flux importResources(String namespace, @QueryValue(defaultValu return unsynchronizedConnector; } - sendEventLog(unsynchronizedConnector.getKind(), unsynchronizedConnector.getMetadata(), ApplyStatus.created, null, unsynchronizedConnector.getSpec()); + sendEventLog(unsynchronizedConnector.getKind(), unsynchronizedConnector.getMetadata(), + ApplyStatus.created, null, unsynchronizedConnector.getSpec()); return connectorService.createOrUpdate(unsynchronizedConnector); }); diff --git a/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java b/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java index cda7ef24..3022090a 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/ConsumerGroupController.java @@ -13,15 +13,17 @@ import io.micronaut.http.annotation.QueryValue; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; -import org.apache.kafka.common.TopicPartition; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Date; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; +import org.apache.kafka.common.TopicPartition; +/** + * Controller to manage the consumer groups. + */ @Tag(name = "Consumer Groups", description = "Manage the consumer groups.") @Controller("/api/namespaces/{namespace}/consumer-groups") public class ConsumerGroupController extends NamespacedResourceController { @@ -29,18 +31,20 @@ public class ConsumerGroupController extends NamespacedResourceController { ConsumerGroupService consumerGroupService; /** - * Reset offsets by topic and consumer group - * @param namespace The namespace - * @param consumerGroup The consumer group + * Reset offsets by topic and consumer group. + * + * @param namespace The namespace + * @param consumerGroup The consumer group * @param consumerGroupResetOffsets The information about how to reset - * @param dryrun Is dry run mode or not ? + * @param dryrun Is dry run mode or not ? * @return The reset offsets response */ @Post("/{consumerGroup}/reset{?dryrun}") public List resetOffsets(String namespace, String consumerGroup, - @Valid @Body ConsumerGroupResetOffsets consumerGroupResetOffsets, - @QueryValue(defaultValue = "false") boolean dryrun) throws ExecutionException { - Namespace ns = getNamespace(namespace); + @Valid @Body + ConsumerGroupResetOffsets consumerGroupResetOffsets, + @QueryValue(defaultValue = "false") boolean dryrun) + throws ExecutionException { // Validate spec List validationErrors = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); @@ -54,7 +58,7 @@ public List resetOffsets(String namespace, St throw new ResourceValidationException(validationErrors, "ConsumerGroup", consumerGroup); } - // Augment + Namespace ns = getNamespace(namespace); consumerGroupResetOffsets.getMetadata().setCreationTimestamp(Date.from(Instant.now())); consumerGroupResetOffsets.getMetadata().setNamespace(ns.getMetadata().getName()); consumerGroupResetOffsets.getMetadata().setCluster(ns.getMetadata().getCluster()); @@ -66,35 +70,40 @@ public List resetOffsets(String namespace, St // Validate Consumer Group is dead or inactive String currentState = consumerGroupService.getConsumerGroupStatus(ns, consumerGroup); if (!List.of("Empty", "Dead").contains(currentState)) { - throw new IllegalStateException("Assignments can only be reset if the consumer group \"" + consumerGroup + "\" is inactive, but the current state is " + currentState.toLowerCase() + "."); + throw new IllegalStateException( + "Assignments can only be reset if the consumer group \"" + consumerGroup + + "\" is inactive, but the current state is " + currentState.toLowerCase() + "."); } // List partitions - List partitionsToReset = consumerGroupService.getPartitionsToReset(ns, consumerGroup, consumerGroupResetOffsets.getSpec().getTopic()); + List partitionsToReset = consumerGroupService.getPartitionsToReset(ns, consumerGroup, + consumerGroupResetOffsets.getSpec().getTopic()); // Prepare offsets - Map preparedOffsets = consumerGroupService.prepareOffsetsToReset(ns, consumerGroup, consumerGroupResetOffsets.getSpec().getOptions(), partitionsToReset, consumerGroupResetOffsets.getSpec().getMethod()); + Map preparedOffsets = consumerGroupService.prepareOffsetsToReset(ns, consumerGroup, + consumerGroupResetOffsets.getSpec().getOptions(), partitionsToReset, + consumerGroupResetOffsets.getSpec().getMethod()); if (!dryrun) { sendEventLog("ConsumerGroupResetOffsets", - consumerGroupResetOffsets.getMetadata(), - ApplyStatus.changed, - null, - consumerGroupResetOffsets.getSpec()); + consumerGroupResetOffsets.getMetadata(), + ApplyStatus.changed, + null, + consumerGroupResetOffsets.getSpec()); consumerGroupService.alterConsumerGroupOffsets(ns, consumerGroup, preparedOffsets); } topicPartitionOffsets = preparedOffsets.entrySet() - .stream() - .map(entry -> ConsumerGroupResetOffsetsResponse.builder() - .spec(ConsumerGroupResetOffsetsResponse.ConsumerGroupResetOffsetsResponseSpec.builder() - .topic(entry.getKey().topic()) - .partition(entry.getKey().partition()) - .offset(entry.getValue()) - .consumerGroup(consumerGroup) - .build()) - .build()) - .toList(); + .stream() + .map(entry -> ConsumerGroupResetOffsetsResponse.builder() + .spec(ConsumerGroupResetOffsetsResponse.ConsumerGroupResetOffsetsResponseSpec.builder() + .topic(entry.getKey().topic()) + .partition(entry.getKey().partition()) + .offset(entry.getValue()) + .consumerGroup(consumerGroup) + .build()) + .build()) + .toList(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java b/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java index 1d19e53c..62857d28 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/ExceptionHandlerController.java @@ -12,135 +12,178 @@ import io.micronaut.http.annotation.Error; import io.micronaut.security.authentication.AuthenticationException; import io.micronaut.security.authentication.AuthorizationException; -import lombok.extern.slf4j.Slf4j; - import jakarta.validation.ConstraintViolation; import jakarta.validation.ConstraintViolationException; import jakarta.validation.ElementKind; import jakarta.validation.Path; import java.util.Iterator; import java.util.List; +import lombok.extern.slf4j.Slf4j; +/** + * Exception handler controller. + */ @Slf4j @Controller("/errors") public class ExceptionHandlerController { + /** + * Handle resource validation exception. + * + * @param request the request + * @param exception the exception + * @return the http response + */ @Error(global = true) public HttpResponse error(HttpRequest request, ResourceValidationException exception) { var status = Status.builder() - .status(StatusPhase.Failed) - .message(String.format("Invalid %s %s", exception.getKind(), exception.getName())) - .reason(StatusReason.Invalid) - .details(StatusDetails.builder() - .kind(exception.getKind()) - .name(exception.getName()) - .causes(exception.getValidationErrors()) - .build()) - .code(HttpStatus.UNPROCESSABLE_ENTITY.getCode()) - .build(); + .status(StatusPhase.Failed) + .message(String.format("Invalid %s %s", exception.getKind(), exception.getName())) + .reason(StatusReason.Invalid) + .details(StatusDetails.builder() + .kind(exception.getKind()) + .name(exception.getName()) + .causes(exception.getValidationErrors()) + .build()) + .code(HttpStatus.UNPROCESSABLE_ENTITY.getCode()) + .build(); return HttpResponse.unprocessableEntity() - .body(status); + .body(status); } + /** + * Handle constraint violation exception. + * + * @param request the request + * @param exception the exception + * @return the http response + */ @Error(global = true) public HttpResponse error(HttpRequest request, ConstraintViolationException exception) { var status = Status.builder() - .status(StatusPhase.Failed) - .message("Invalid Resource") - .reason(StatusReason.Invalid) - .details(StatusDetails.builder() - .causes(exception.getConstraintViolations().stream().map(this::formatViolation).toList()) - .build()) - .code(HttpStatus.UNPROCESSABLE_ENTITY.getCode()) - .build(); + .status(StatusPhase.Failed) + .message("Invalid Resource") + .reason(StatusReason.Invalid) + .details(StatusDetails.builder() + .causes(exception.getConstraintViolations().stream().map(this::formatViolation).toList()) + .build()) + .code(HttpStatus.UNPROCESSABLE_ENTITY.getCode()) + .build(); return HttpResponse.unprocessableEntity() - .body(status); - } - - private String formatViolation(ConstraintViolation violation) { - Path propertyPath = violation.getPropertyPath(); - StringBuilder message = new StringBuilder(); - Iterator i = propertyPath.iterator(); - while (i.hasNext()) { - Path.Node node = i.next(); - if (node.getKind() == ElementKind.METHOD || node.getKind() == ElementKind.CONSTRUCTOR) { - continue; - } - message.append(node.getName()); - if (i.hasNext()) { - message.append('.'); - } - } - message.append(": ").append(violation.getMessage()); - return message.toString(); + .body(status); } + /** + * Handle not found exception. + * + * @param request the request + * @return the http response + */ @Error(global = true, status = HttpStatus.NOT_FOUND) public HttpResponse error(HttpRequest request) { var status = Status.builder() - .status(StatusPhase.Failed) - .message("Not Found") - .reason(StatusReason.NotFound) - .code(HttpStatus.NOT_FOUND.getCode()) - .build(); + .status(StatusPhase.Failed) + .message("Not Found") + .reason(StatusReason.NotFound) + .code(HttpStatus.NOT_FOUND.getCode()) + .build(); return HttpResponse.status(HttpStatus.NOT_FOUND) - .body(status); + .body(status); } + /** + * Handle authentication exception. + * + * @param request the request + * @param exception the exception + * @return the http response + */ @Error(global = true) public HttpResponse error(HttpRequest request, AuthenticationException exception) { var status = Status.builder() - .status(StatusPhase.Failed) - .message(exception.getMessage()) - .reason(StatusReason.Unauthorized) - .code(HttpStatus.UNAUTHORIZED.getCode()) - .build(); + .status(StatusPhase.Failed) + .message(exception.getMessage()) + .reason(StatusReason.Unauthorized) + .code(HttpStatus.UNAUTHORIZED.getCode()) + .build(); return HttpResponse.unauthorized().body(status); } + /** + * Handle authorization exception. + * + * @param request the request + * @param exception the exception + * @return the http response + */ @Error(global = true) public HttpResponse error(HttpRequest request, AuthorizationException exception) { if (exception.isForbidden()) { var status = Status.builder() - .status(StatusPhase.Failed) - .message("Resource forbidden") - .reason(StatusReason.Forbidden) - .code(HttpStatus.FORBIDDEN.getCode()) - .build(); + .status(StatusPhase.Failed) + .message("Resource forbidden") + .reason(StatusReason.Forbidden) + .code(HttpStatus.FORBIDDEN.getCode()) + .build(); return HttpResponse.status(HttpStatus.FORBIDDEN) - .body(status); + .body(status); } var status = Status.builder() - .status(StatusPhase.Failed) - .message(exception.getMessage()) - .reason(StatusReason.Unauthorized) - .code(HttpStatus.UNAUTHORIZED.getCode()) - .build(); + .status(StatusPhase.Failed) + .message(exception.getMessage()) + .reason(StatusReason.Unauthorized) + .code(HttpStatus.UNAUTHORIZED.getCode()) + .build(); return HttpResponse.unauthorized().body(status); } + /** + * Handle exception. + * + * @param request the request + * @param exception the exception + * @return the http response + */ @Error(global = true) public HttpResponse error(HttpRequest request, Exception exception) { log.error("An error occurred on API endpoint {} {}: {}", request.getMethodName(), - request.getUri(), exception.getMessage(), exception); + request.getUri(), exception.getMessage(), exception); Status status = Status.builder() - .status(StatusPhase.Failed) - .message("Internal server error") - .reason(StatusReason.InternalError) - .details(StatusDetails.builder() - .causes(List.of(exception.getMessage() != null ? exception.getMessage() : exception.toString())) - .build()) - .code(HttpStatus.INTERNAL_SERVER_ERROR.getCode()) - .build(); + .status(StatusPhase.Failed) + .message("Internal server error") + .reason(StatusReason.InternalError) + .details(StatusDetails.builder() + .causes(List.of(exception.getMessage() != null ? exception.getMessage() : exception.toString())) + .build()) + .code(HttpStatus.INTERNAL_SERVER_ERROR.getCode()) + .build(); return HttpResponse - .status(HttpStatus.INTERNAL_SERVER_ERROR) - .body(status); + .status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(status); + } + + private String formatViolation(ConstraintViolation violation) { + Path propertyPath = violation.getPropertyPath(); + StringBuilder message = new StringBuilder(); + Iterator i = propertyPath.iterator(); + while (i.hasNext()) { + Path.Node node = i.next(); + if (node.getKind() == ElementKind.METHOD || node.getKind() == ElementKind.CONSTRUCTOR) { + continue; + } + message.append(node.getName()); + if (i.hasNext()) { + message.append('.'); + } + } + message.append(": ").append(violation.getMessage()); + return message.toString(); } } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java b/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java index 4df03541..d075a684 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/NamespaceController.java @@ -7,11 +7,15 @@ import com.michelin.ns4kafka.utils.enums.ApplyStatus; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.inject.Inject; - import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; import jakarta.validation.Valid; import java.time.Instant; import java.util.ArrayList; @@ -19,6 +23,9 @@ import java.util.List; import java.util.Optional; +/** + * Controller to manage the namespaces. + */ @RolesAllowed(ResourceBasedSecurityRule.IS_ADMIN) @Tag(name = "Namespaces", description = "Manage the namespaces.") @Controller("/api/namespaces") @@ -27,7 +34,8 @@ public class NamespaceController extends NonNamespacedResourceController { NamespaceService namespaceService; /** - * List namespaces + * List namespaces. + * * @return A list of namespaces */ @Get("/") @@ -36,7 +44,8 @@ public List list() { } /** - * Get a namespace by name + * Get a namespace by name. + * * @param namespace The namespace * @return A namespace */ @@ -46,13 +55,15 @@ public Optional get(String namespace) { } /** - * Create a namespace + * Create a namespace. + * * @param namespace The namespace - * @param dryrun Does the creation is a dry run + * @param dryrun Does the creation is a dry run * @return The created namespace */ @Post("{?dryrun}") - public HttpResponse apply(@Valid @Body Namespace namespace, @QueryValue(defaultValue = "false") boolean dryrun) { + public HttpResponse apply(@Valid @Body Namespace namespace, + @QueryValue(defaultValue = "false") boolean dryrun) { Optional existingNamespace = namespaceService.findByName(namespace.getMetadata().getName()); List validationErrors = new ArrayList<>(); @@ -61,20 +72,21 @@ public HttpResponse apply(@Valid @Body Namespace namespace, @QueryVal } else { if (!namespace.getMetadata().getCluster().equals(existingNamespace.get().getMetadata().getCluster())) { validationErrors.add("Invalid value " + namespace.getMetadata().getCluster() - + " for cluster: Value is immutable (" - + existingNamespace.get().getMetadata().getCluster() + ")"); + + " for cluster: Value is immutable (" + + existingNamespace.get().getMetadata().getCluster() + ")"); } if (!namespace.getMetadata().getCluster().equals(existingNamespace.get().getMetadata().getCluster())) { validationErrors.add("Invalid value " + namespace.getSpec().getKafkaUser() - + " for kafkaUser: Value is immutable (" - + existingNamespace.get().getSpec().getKafkaUser() + ")"); + + " for kafkaUser: Value is immutable (" + + existingNamespace.get().getSpec().getKafkaUser() + ")"); } } validationErrors.addAll(namespaceService.validate(namespace)); if (!validationErrors.isEmpty()) { - throw new ResourceValidationException(validationErrors, namespace.getKind(), namespace.getMetadata().getName()); + throw new ResourceValidationException(validationErrors, namespace.getKind(), + namespace.getMetadata().getName()); } namespace.getMetadata().setNamespace(namespace.getMetadata().getName()); @@ -91,18 +103,19 @@ public HttpResponse apply(@Valid @Body Namespace namespace, @QueryVal } sendEventLog(namespace.getKind(), - namespace.getMetadata(), - status, - existingNamespace.map(Namespace::getSpec).orElse(null), - namespace.getSpec()); + namespace.getMetadata(), + status, + existingNamespace.map(Namespace::getSpec).orElse(null), + namespace.getSpec()); return formatHttpResponse(namespaceService.createOrUpdate(namespace), status); } /** - * Delete a namespace + * Delete a namespace. + * * @param namespace The namespace - * @param dryrun Is dry run mode or not ? + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Delete("/{namespace}{?dryrun}") @@ -115,8 +128,8 @@ public HttpResponse delete(String namespace, @QueryValue(defaultValue = "f List namespaceResources = namespaceService.listAllNamespaceResources(optionalNamespace.get()); if (!namespaceResources.isEmpty()) { List validationErrors = namespaceResources.stream() - .map(s -> "Namespace resource must be deleted first :" + s) - .toList(); + .map(s -> "Namespace resource must be deleted first: " + s) + .toList(); throw new ResourceValidationException(validationErrors, "Namespace", namespace); } @@ -126,10 +139,10 @@ public HttpResponse delete(String namespace, @QueryValue(defaultValue = "f var namespaceToDelete = optionalNamespace.get(); sendEventLog(namespaceToDelete.getKind(), - namespaceToDelete.getMetadata(), - ApplyStatus.deleted, - namespaceToDelete.getSpec(), - null); + namespaceToDelete.getMetadata(), + ApplyStatus.deleted, + namespaceToDelete.getSpec(), + null); namespaceService.delete(optionalNamespace.get()); return HttpResponse.noContent(); } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java b/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java index c1494d8f..6eaf1a28 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/RoleBindingController.java @@ -5,21 +5,28 @@ import com.michelin.ns4kafka.models.RoleBinding; import com.michelin.ns4kafka.services.RoleBindingService; import com.michelin.ns4kafka.utils.enums.ApplyStatus; -import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.micronaut.scheduling.TaskExecutors; import io.micronaut.scheduling.annotation.ExecuteOn; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Date; import java.util.List; import java.util.Optional; +/** + * Controller to manage role bindings. + */ @Tag(name = "Role Bindings", description = "Manage the role bindings.") @Controller(value = "/api/namespaces/{namespace}/role-bindings") @ExecuteOn(TaskExecutors.IO) @@ -28,7 +35,8 @@ public class RoleBindingController extends NamespacedResourceController { RoleBindingService roleBindingService; /** - * List role bindings by namespace + * List role bindings by namespace. + * * @param namespace The namespace * @return A list of role bindings */ @@ -38,9 +46,10 @@ public List list(String namespace) { } /** - * Get a role binding by namespace and name + * Get a role binding by namespace and name. + * * @param namespace The namespace - * @param name The role binding name + * @param name The role binding name * @return A role binding */ @Get("/{name}") @@ -49,21 +58,24 @@ public Optional get(String namespace, String name) { } /** - * Create a role binding - * @param namespace The namespace + * Create a role binding. + * + * @param namespace The namespace * @param roleBinding The role binding - * @param dryrun Does the creation is a dry run + * @param dryrun Does the creation is a dry run * @return The created role binding */ @Post("{?dryrun}") - public HttpResponse apply(String namespace, @Valid @Body RoleBinding roleBinding, @QueryValue(defaultValue = "false") boolean dryrun) { + public HttpResponse apply(String namespace, @Valid @Body RoleBinding roleBinding, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); roleBinding.getMetadata().setCreationTimestamp(Date.from(Instant.now())); roleBinding.getMetadata().setCluster(ns.getMetadata().getCluster()); roleBinding.getMetadata().setNamespace(namespace); - Optional existingRoleBinding = roleBindingService.findByName(namespace, roleBinding.getMetadata().getName()); + Optional existingRoleBinding = + roleBindingService.findByName(namespace, roleBinding.getMetadata().getName()); if (existingRoleBinding.isPresent() && existingRoleBinding.get().equals(roleBinding)) { return formatHttpResponse(existingRoleBinding.get(), ApplyStatus.unchanged); } @@ -74,32 +86,29 @@ public HttpResponse apply(String namespace, @Valid @Body RoleBindin } sendEventLog(roleBinding.getKind(), - roleBinding.getMetadata(), - status, - existingRoleBinding.map(RoleBinding::getSpec).orElse(null), - roleBinding.getSpec()); + roleBinding.getMetadata(), + status, + existingRoleBinding.map(RoleBinding::getSpec).orElse(null), + roleBinding.getSpec()); roleBindingService.create(roleBinding); return formatHttpResponse(roleBinding, status); } /** - * Delete a role binding + * Delete a role binding. + * * @param namespace The namespace - * @param name The role binding - * @param dryrun Is dry run mode or not ? + * @param name The role binding + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Delete("/{name}{?dryrun}") @Status(HttpStatus.NO_CONTENT) - public HttpResponse delete(String namespace, String name, @QueryValue(defaultValue = "false") boolean dryrun) { + public HttpResponse delete(String namespace, String name, + @QueryValue(defaultValue = "false") boolean dryrun) { Optional roleBinding = roleBindingService.findByName(namespace, name); - if (roleBinding.isEmpty()) { - throw new ResourceValidationException( - List.of("Invalid value " + name + " for name : Role Binding doesn't exist in this namespace"), - "RoleBinding", - name - ); + return HttpResponse.notFound(); } if (dryrun) { @@ -107,11 +116,11 @@ public HttpResponse delete(String namespace, String name, @QueryValue(defa } var roleBindingToDelete = roleBinding.get(); - sendEventLog(roleBindingToDelete .getKind(), - roleBindingToDelete.getMetadata(), - ApplyStatus.deleted, - roleBindingToDelete.getSpec(), - null); + sendEventLog(roleBindingToDelete.getKind(), + roleBindingToDelete.getMetadata(), + ApplyStatus.deleted, + roleBindingToDelete.getSpec(), + null); roleBindingService.delete(roleBindingToDelete); return HttpResponse.noContent(); } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java b/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java index 50f409ea..437dcc99 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/SchemaController.java @@ -11,20 +11,29 @@ import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.PathVariable; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.micronaut.scheduling.TaskExecutors; import io.micronaut.scheduling.annotation.ExecuteOn; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Date; import java.util.List; import java.util.Optional; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +/** + * Controller to manage schemas. + */ @Tag(name = "Schemas", description = "Manage the schemas.") @Controller(value = "/api/namespaces/{namespace}/schemas") @ExecuteOn(TaskExecutors.IO) @@ -33,7 +42,8 @@ public class SchemaController extends NamespacedResourceController { SchemaService schemaService; /** - * List schemas by namespace + * List schemas by namespace. + * * @param namespace The namespace * @return A list of schemas */ @@ -43,7 +53,8 @@ public Flux list(String namespace) { } /** - * Get the last version of a schema by namespace and subject + * Get the last version of a schema by namespace and subject. + * * @param namespace The namespace * @param subject The subject * @return A schema @@ -60,76 +71,86 @@ public Mono get(String namespace, String subject) { } /** - * Publish a schema + * Publish a schema. + * * @param namespace The namespace * @param schema The schema to create * @param dryrun Does the creation is a dry run * @return The created schema */ @Post - public Mono> apply(String namespace, @Valid @Body Schema schema, @QueryValue(defaultValue = "false") boolean dryrun) { + public Mono> apply(String namespace, @Valid @Body Schema schema, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); // Validate TopicNameStrategy // https://github.com/confluentinc/schema-registry/blob/master/schema-serializer/src/main/java/io/confluent/kafka/serializers/subject/TopicNameStrategy.java if (!schema.getMetadata().getName().endsWith("-key") && !schema.getMetadata().getName().endsWith("-value")) { - return Mono.error(new ResourceValidationException(List.of("Invalid value " + schema.getMetadata().getName() + - " for name: subject must end with -key or -value"), schema.getKind(), schema.getMetadata().getName())); + return Mono.error( + new ResourceValidationException(List.of("Invalid value " + schema.getMetadata().getName() + + " for name: subject must end with -key or -value"), schema.getKind(), + schema.getMetadata().getName())); } // Validate ownership if (!schemaService.isNamespaceOwnerOfSubject(ns, schema.getMetadata().getName())) { - return Mono.error(new ResourceValidationException(List.of(String.format("Namespace not owner of this schema %s.", + return Mono.error( + new ResourceValidationException(List.of(String.format("Namespace not owner of this schema %s.", schema.getMetadata().getName())), schema.getKind(), schema.getMetadata().getName())); } return schemaService - .validateSchemaCompatibility(ns.getMetadata().getCluster(), schema) - .flatMap(validationErrors -> { - if (!validationErrors.isEmpty()) { - return Mono.error(new ResourceValidationException(validationErrors, schema.getKind(), schema.getMetadata().getName())); - } - - return schemaService - .getLatestSubject(ns, schema.getMetadata().getName()) - .map(Optional::of) - .defaultIfEmpty(Optional.empty()) - .flatMap(latestSubjectOptional -> { - schema.getMetadata().setCreationTimestamp(Date.from(Instant.now())); - schema.getMetadata().setCluster(ns.getMetadata().getCluster()); - schema.getMetadata().setNamespace(ns.getMetadata().getName()); - latestSubjectOptional.ifPresent(value -> schema.getSpec().setCompatibility(value.getSpec().getCompatibility())); - - if (dryrun) { - // Cannot compute the "unchanged" apply status before getting the ID at registration - return Mono.just(formatHttpResponse(schema, - latestSubjectOptional.isPresent() ? ApplyStatus.changed : ApplyStatus.created)); + .validateSchemaCompatibility(ns.getMetadata().getCluster(), schema) + .flatMap(validationErrors -> { + if (!validationErrors.isEmpty()) { + return Mono.error(new ResourceValidationException(validationErrors, schema.getKind(), + schema.getMetadata().getName())); + } + + return schemaService + .getLatestSubject(ns, schema.getMetadata().getName()) + .map(Optional::of) + .defaultIfEmpty(Optional.empty()) + .flatMap(latestSubjectOptional -> { + schema.getMetadata().setCreationTimestamp(Date.from(Instant.now())); + schema.getMetadata().setCluster(ns.getMetadata().getCluster()); + schema.getMetadata().setNamespace(ns.getMetadata().getName()); + latestSubjectOptional.ifPresent( + value -> schema.getSpec().setCompatibility(value.getSpec().getCompatibility())); + + if (dryrun) { + // Cannot compute the "unchanged" apply status before getting the ID at registration + return Mono.just(formatHttpResponse(schema, + latestSubjectOptional.isPresent() ? ApplyStatus.changed : ApplyStatus.created)); + } + + return schemaService + .register(ns, schema) + .map(id -> { + ApplyStatus status; + + if (latestSubjectOptional.isEmpty()) { + status = ApplyStatus.created; + sendEventLog(schema.getKind(), schema.getMetadata(), status, null, + schema.getSpec()); + } else if (id > latestSubjectOptional.get().getSpec().getId()) { + status = ApplyStatus.changed; + sendEventLog(schema.getKind(), schema.getMetadata(), status, + latestSubjectOptional.get().getSpec(), + schema.getSpec()); + } else { + status = ApplyStatus.unchanged; } - return schemaService - .register(ns, schema) - .map(id -> { - ApplyStatus status; - - if (latestSubjectOptional.isEmpty()) { - status = ApplyStatus.created; - sendEventLog(schema.getKind(), schema.getMetadata(), status, null, schema.getSpec()); - } else if (id > latestSubjectOptional.get().getSpec().getId()) { - status = ApplyStatus.changed; - sendEventLog(schema.getKind(), schema.getMetadata(), status, latestSubjectOptional.get().getSpec(), - schema.getSpec()); - } else { - status = ApplyStatus.unchanged; - } - - return formatHttpResponse(schema, status); - }); + return formatHttpResponse(schema, status); }); - }); + }); + }); } /** - * Delete all schemas under the given subject + * Delete all schemas under the given subject. + * * @param namespace The current namespace * @param subject The current subject to delete * @param dryrun Run in dry mode or not @@ -138,86 +159,90 @@ public Mono> apply(String namespace, @Valid @Body Schema sc @Status(HttpStatus.NO_CONTENT) @Delete("/{subject}") public Mono> deleteSubject(String namespace, @PathVariable String subject, - @QueryValue(defaultValue = "false") boolean dryrun) { + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); // Validate ownership if (!schemaService.isNamespaceOwnerOfSubject(ns, subject)) { - return Mono.error(new ResourceValidationException(List.of(String.format("Namespace not owner of this schema %s.", subject)), - AccessControlEntry.ResourceType.SCHEMA.toString(), subject)); + return Mono.error(new ResourceValidationException( + List.of(String.format("Namespace not owner of this schema %s.", subject)), + AccessControlEntry.ResourceType.SCHEMA.toString(), subject)); } - return schemaService.getLatestSubject(ns, subject) - .map(Optional::of) - .defaultIfEmpty(Optional.empty()) - .flatMap(latestSubjectOptional -> { - if (latestSubjectOptional.isEmpty()) { - return Mono.just(HttpResponse.notFound()); - } - - if (dryrun) { - return Mono.just(HttpResponse.noContent()); - } - - Schema schemaToDelete = latestSubjectOptional.get(); - sendEventLog(schemaToDelete.getKind(), - schemaToDelete.getMetadata(), - ApplyStatus.deleted, - schemaToDelete.getSpec(), - null); - - return schemaService - .deleteSubject(ns, subject) - .map(deletedSchemaIds -> HttpResponse.noContent()); - }); + return schemaService.getLatestSubject(ns, subject) + .map(Optional::of) + .defaultIfEmpty(Optional.empty()) + .flatMap(latestSubjectOptional -> { + if (latestSubjectOptional.isEmpty()) { + return Mono.just(HttpResponse.notFound()); + } + + if (dryrun) { + return Mono.just(HttpResponse.noContent()); + } + + Schema schemaToDelete = latestSubjectOptional.get(); + sendEventLog(schemaToDelete.getKind(), + schemaToDelete.getMetadata(), + ApplyStatus.deleted, + schemaToDelete.getSpec(), + null); + + return schemaService + .deleteSubject(ns, subject) + .map(deletedSchemaIds -> HttpResponse.noContent()); + }); } /** - * Update the compatibility of a subject + * Update the compatibility of a subject. + * * @param namespace The namespace * @param subject The subject to update * @param compatibility The compatibility to apply * @return A schema compatibility state */ @Post("/{subject}/config") - public Mono> config(String namespace, @PathVariable String subject, Schema.Compatibility compatibility) { + public Mono> config(String namespace, @PathVariable String subject, + Schema.Compatibility compatibility) { Namespace ns = getNamespace(namespace); if (!schemaService.isNamespaceOwnerOfSubject(ns, subject)) { - return Mono.error(new ResourceValidationException(List.of("Invalid prefix " + subject + - " : namespace not owner of this subject"), AccessControlEntry.ResourceType.SCHEMA.toString(), subject)); + return Mono.error(new ResourceValidationException(List.of("Invalid prefix " + subject + + " : namespace not owner of this subject"), + AccessControlEntry.ResourceType.SCHEMA.toString(), subject)); } return schemaService.getLatestSubject(ns, subject) - .map(Optional::of) - .defaultIfEmpty(Optional.empty()) - .flatMap(latestSubjectOptional -> { - if (latestSubjectOptional.isEmpty()) { - return Mono.just(HttpResponse.notFound()); - } - - SchemaCompatibilityState state = SchemaCompatibilityState.builder() - .metadata(latestSubjectOptional.get().getMetadata()) - .spec(SchemaCompatibilityState.SchemaCompatibilityStateSpec.builder() - .compatibility(compatibility) - .build()) - .build(); - - if (latestSubjectOptional.get().getSpec().getCompatibility().equals(compatibility)) { - return Mono.just(HttpResponse.ok(state)); - } - - return schemaService - .updateSubjectCompatibility(ns, latestSubjectOptional.get(), compatibility) - .map(schemaCompatibility -> { - sendEventLog("SchemaCompatibilityState", - latestSubjectOptional.get().getMetadata(), - ApplyStatus.changed, - latestSubjectOptional.get().getSpec().getCompatibility(), - compatibility); - - return HttpResponse.ok(state); - }); - }); + .map(Optional::of) + .defaultIfEmpty(Optional.empty()) + .flatMap(latestSubjectOptional -> { + if (latestSubjectOptional.isEmpty()) { + return Mono.just(HttpResponse.notFound()); + } + + SchemaCompatibilityState state = SchemaCompatibilityState.builder() + .metadata(latestSubjectOptional.get().getMetadata()) + .spec(SchemaCompatibilityState.SchemaCompatibilityStateSpec.builder() + .compatibility(compatibility) + .build()) + .build(); + + if (latestSubjectOptional.get().getSpec().getCompatibility().equals(compatibility)) { + return Mono.just(HttpResponse.ok(state)); + } + + return schemaService + .updateSubjectCompatibility(ns, latestSubjectOptional.get(), compatibility) + .map(schemaCompatibility -> { + sendEventLog("SchemaCompatibilityState", + latestSubjectOptional.get().getMetadata(), + ApplyStatus.changed, + latestSubjectOptional.get().getSpec().getCompatibility(), + compatibility); + + return HttpResponse.ok(state); + }); + }); } } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java b/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java index 28ffc6c5..284cb0b8 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/StreamController.java @@ -8,16 +8,24 @@ import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Date; import java.util.List; import java.util.Optional; +/** + * Controller to manage Kafka Streams. + */ @Tag(name = "Kafka Streams", description = "Manage the Kafka Streams.") @Controller(value = "/api/namespaces/{namespace}/streams") public class StreamController extends NamespacedResourceController { @@ -25,43 +33,48 @@ public class StreamController extends NamespacedResourceController { StreamService streamService; /** - * List Kafka Streams by namespace + * List Kafka Streams by namespace. + * * @param namespace The namespace * @return A list of Kafka Streams */ @Get("/") - List list(String namespace){ + List list(String namespace) { Namespace ns = getNamespace(namespace); return streamService.findAllForNamespace(ns); } /** - * Get a Kafka Streams by namespace and name + * Get a Kafka Streams by namespace and name. + * * @param namespace The name - * @param stream The Kafka Streams name + * @param stream The Kafka Streams name * @return The Kafka Streams */ @Get("/{stream}") - Optional get(String namespace, String stream){ + Optional get(String namespace, String stream) { Namespace ns = getNamespace(namespace); return streamService.findByName(ns, stream); } /** - * Create a Kafka Streams + * Create a Kafka Streams. + * * @param namespace The namespace - * @param stream The Kafka Stream - * @param dryrun Is dry run mode or not ? + * @param stream The Kafka Stream + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Post("/{?dryrun}") - HttpResponse apply(String namespace,@Body @Valid KafkaStream stream, @QueryValue(defaultValue = "false") boolean dryrun){ + HttpResponse apply(String namespace, @Body @Valid KafkaStream stream, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); if (!streamService.isNamespaceOwnerOfKafkaStream(ns, stream.getMetadata().getName())) { throw new ResourceValidationException(List.of("Invalid value " + stream.getMetadata().getName() - + " for name: Namespace not OWNER of underlying Topic prefix and Group prefix"), "Stream", stream.getMetadata().getName()); + + " for name: Namespace not OWNER of underlying Topic prefix and Group prefix"), "Stream", + stream.getMetadata().getName()); } // Augment the Stream @@ -71,7 +84,7 @@ HttpResponse apply(String namespace,@Body @Valid KafkaStream stream // Creation of the correct ACLs Optional existingStream = streamService.findByName(ns, stream.getMetadata().getName()); - if (existingStream.isPresent() && existingStream.get().equals(stream)){ + if (existingStream.isPresent() && existingStream.get().equals(stream)) { return formatHttpResponse(stream, ApplyStatus.unchanged); } @@ -82,28 +95,29 @@ HttpResponse apply(String namespace,@Body @Valid KafkaStream stream } sendEventLog(stream.getKind(), - stream.getMetadata(), - status, - null, - null); + stream.getMetadata(), + status, + null, + null); return formatHttpResponse(streamService.create(stream), status); } /** - * Delete a Kafka Streams + * Delete a Kafka Streams. + * * @param namespace The namespace - * @param stream The Kafka Streams - * @param dryrun Is dry run mode or not ? + * @param stream The Kafka Streams + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Status(HttpStatus.NO_CONTENT) @Delete("/{stream}{?dryrun}") - HttpResponse delete(String namespace,String stream, @QueryValue(defaultValue = "false") boolean dryrun){ + HttpResponse delete(String namespace, String stream, @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); if (!streamService.isNamespaceOwnerOfKafkaStream(ns, stream)) { throw new ResourceValidationException(List.of("Invalid value " + stream - + " for name: Namespace not OWNER of underlying Topic prefix and Group prefix"), "Stream", stream); + + " for name: Namespace not OWNER of underlying Topic prefix and Group prefix"), "Stream", stream); } Optional optionalStream = streamService.findByName(ns, stream); @@ -118,10 +132,10 @@ HttpResponse delete(String namespace,String stream, @QueryValue(defaultVal var streamToDelete = optionalStream.get(); sendEventLog(streamToDelete.getKind(), - streamToDelete.getMetadata(), - ApplyStatus.deleted, - null, - null); + streamToDelete.getMetadata(), + ApplyStatus.deleted, + null, + null); streamService.delete(ns, optionalStream.get()); return HttpResponse.noContent(); } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/UserController.java b/src/main/java/com/michelin/ns4kafka/controllers/UserController.java index 3fd3bfd3..75b29774 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/UserController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/UserController.java @@ -14,11 +14,13 @@ import io.micronaut.inject.qualifiers.Qualifiers; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; - import java.time.Instant; import java.util.Date; import java.util.List; +/** + * Controller to manage users. + */ @Tag(name = "Users", description = "Manage the users.") @Controller(value = "/api/namespaces/{namespace}/users") public class UserController extends NamespacedResourceController { @@ -26,34 +28,38 @@ public class UserController extends NamespacedResourceController { ApplicationContext applicationContext; /** - * Reset a password + * Reset a password. + * * @param namespace The namespace - * @param user The user + * @param user The user * @return The new password */ @Post("/{user}/reset-password") public HttpResponse resetPassword(String namespace, String user) { Namespace ns = getNamespace(namespace); - if(!ns.getSpec().getKafkaUser().equals(user)){ - throw new ResourceValidationException(List.of(String.format("Invalid user %s : Doesn't belong to namespace %s", user, namespace)), "KafkaUserResetPassword", user); + if (!ns.getSpec().getKafkaUser().equals(user)) { + throw new ResourceValidationException( + List.of(String.format("Invalid user %s : Doesn't belong to namespace %s", user, namespace)), + "KafkaUserResetPassword", user); } - UserAsyncExecutor userAsyncExecutor = applicationContext.getBean(UserAsyncExecutor.class, Qualifiers.byName(ns.getMetadata().getCluster())); + UserAsyncExecutor userAsyncExecutor = + applicationContext.getBean(UserAsyncExecutor.class, Qualifiers.byName(ns.getMetadata().getCluster())); String password = userAsyncExecutor.resetPassword(ns.getSpec().getKafkaUser()); KafkaUserResetPassword response = KafkaUserResetPassword.builder() - .metadata(ObjectMeta.builder() - .name(ns.getSpec().getKafkaUser()) - .namespace(namespace) - .cluster(ns.getMetadata().getCluster()) - .creationTimestamp(Date.from(Instant.now())) - .build()) - .spec(KafkaUserResetPassword.KafkaUserResetPasswordSpec.builder() - .newPassword(password) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name(ns.getSpec().getKafkaUser()) + .namespace(namespace) + .cluster(ns.getMetadata().getCluster()) + .creationTimestamp(Date.from(Instant.now())) + .build()) + .spec(KafkaUserResetPassword.KafkaUserResetPasswordSpec.builder() + .newPassword(password) + .build()) + .build(); sendEventLog("KafkaUserResetPassword", response.getMetadata(), ApplyStatus.changed, null, response.getSpec()); return HttpResponse.ok(response); } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/acl/AclController.java b/src/main/java/com/michelin/ns4kafka/controllers/acl/AclController.java index 55e6f92a..383628a4 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/acl/AclController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/acl/AclController.java @@ -1,20 +1,26 @@ package com.michelin.ns4kafka.controllers.acl; +import static com.michelin.ns4kafka.services.AccessControlEntryService.PUBLIC_GRANTED_TO; + import com.michelin.ns4kafka.controllers.generic.NamespacedResourceController; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.AccessControlEntryService; -import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.utils.enums.ApplyStatus; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.micronaut.security.authentication.Authentication; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Comparator; @@ -22,21 +28,20 @@ import java.util.List; import java.util.Optional; -import static com.michelin.ns4kafka.services.AccessControlEntryService.PUBLIC_GRANTED_TO; - +/** + * Controller to manage ACLs. + */ @Tag(name = "ACLs", description = "Manage the ACLs.") @Controller("/api/namespaces/{namespace}/acls") public class AclController extends NamespacedResourceController { - @Inject - NamespaceService namespaceService; - @Inject AccessControlEntryService accessControlEntryService; /** - * List ACLs by namespace + * List ACLs by namespace. + * * @param namespace The namespace - * @param limit The ACL scope + * @param limit The ACL scope * @return A list of ACLs */ @Get("{?limit}") @@ -46,67 +51,66 @@ public List list(String namespace, Optional limit) } Namespace ns = getNamespace(namespace); - switch (limit.get()) { - case GRANTEE: - return accessControlEntryService.findAllGrantedToNamespace(ns) - .stream() - .sorted(Comparator.comparing(o -> o.getMetadata().getNamespace())) - .toList(); - case GRANTOR: - return accessControlEntryService.findAllForCluster(ns.getMetadata().getCluster()) - .stream() - // granted by me - .filter(accessControlEntry -> accessControlEntry.getMetadata().getNamespace().equals(namespace)) - // without the granted to me - .filter(accessControlEntry -> !accessControlEntry.getSpec().getGrantedTo().equals(namespace)) - .sorted(Comparator.comparing(o -> o.getSpec().getGrantedTo())) - .toList(); - case ALL: - default: - return accessControlEntryService.findAllForCluster(ns.getMetadata().getCluster()) - .stream() - .filter(accessControlEntry -> - accessControlEntry.getMetadata().getNamespace().equals(namespace) - || accessControlEntry.getSpec().getGrantedTo().equals(namespace) - || accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) - .sorted(Comparator.comparing(o -> o.getMetadata().getNamespace())) - .toList(); - } + return switch (limit.get()) { + case GRANTEE -> accessControlEntryService.findAllGrantedToNamespace(ns) + .stream() + .sorted(Comparator.comparing(o -> o.getMetadata().getNamespace())) + .toList(); + case GRANTOR -> accessControlEntryService.findAllForCluster(ns.getMetadata().getCluster()) + .stream() + // granted by me + .filter(accessControlEntry -> accessControlEntry.getMetadata().getNamespace().equals(namespace)) + // without the granted to me + .filter(accessControlEntry -> !accessControlEntry.getSpec().getGrantedTo().equals(namespace)) + .sorted(Comparator.comparing(o -> o.getSpec().getGrantedTo())) + .toList(); + default -> accessControlEntryService.findAllForCluster(ns.getMetadata().getCluster()) + .stream() + .filter(accessControlEntry -> + accessControlEntry.getMetadata().getNamespace().equals(namespace) + || accessControlEntry.getSpec().getGrantedTo().equals(namespace) + || accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) + .sorted(Comparator.comparing(o -> o.getMetadata().getNamespace())) + .toList(); + }; } /** - * Get an ACL by namespace and name + * Get an ACL by namespace and name. + * * @param namespace The name - * @param acl The ACL name + * @param acl The ACL name * @return The ACL */ @Get("/{acl}") public Optional get(String namespace, String acl) { return list(namespace, Optional.of(AclLimit.ALL)) - .stream() - .filter(accessControlEntry -> accessControlEntry.getMetadata().getName().equals(acl)) - .findFirst(); + .stream() + .filter(accessControlEntry -> accessControlEntry.getMetadata().getName().equals(acl)) + .findFirst(); } /** - * Create an ACL - * @param authentication The authentication entity - * @param namespace The namespace + * Create an ACL. + * + * @param authentication The authentication entity + * @param namespace The namespace * @param accessControlEntry The ACL - * @param dryrun Is dry run mode or not ? + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Post("{?dryrun}") - public HttpResponse apply(Authentication authentication, String namespace, @Valid @Body AccessControlEntry accessControlEntry, @QueryValue(defaultValue = "false") boolean dryrun) { + public HttpResponse apply(Authentication authentication, String namespace, + @Valid @Body AccessControlEntry accessControlEntry, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); List roles = (List) authentication.getAttributes().get("roles"); boolean isAdmin = roles.contains(ResourceBasedSecurityRule.IS_ADMIN); - // Self-assigned ACL (spec.grantedTo == metadata.namespace) - boolean isSelfAssignedACL = namespace.equals(accessControlEntry.getSpec().getGrantedTo()); + boolean isSelfAssignedAcl = namespace.equals(accessControlEntry.getSpec().getGrantedTo()); List validationErrors; - if (isAdmin && isSelfAssignedACL) { + if (isAdmin && isSelfAssignedAcl) { // Validate overlapping OWNER validationErrors = accessControlEntryService.validateAsAdmin(accessControlEntry, ns); } else { @@ -114,25 +118,29 @@ public HttpResponse apply(Authentication authentication, Str } if (!validationErrors.isEmpty()) { - throw new ResourceValidationException(validationErrors, accessControlEntry.getKind(), accessControlEntry.getMetadata().getName()); + throw new ResourceValidationException(validationErrors, accessControlEntry.getKind(), + accessControlEntry.getMetadata().getName()); } // AccessControlEntry spec is immutable // This prevents accidental updates on ACL resources already declared with the same name (with different rules) - Optional existingACL = accessControlEntryService.findByName(namespace, accessControlEntry.getMetadata().getName()); - if(existingACL.isPresent() && !existingACL.get().getSpec().equals(accessControlEntry.getSpec())){ - throw new ResourceValidationException(List.of("Invalid modification: `spec` is immutable. You can still update `metadata`"), accessControlEntry.getKind(), accessControlEntry.getMetadata().getName()); + Optional existingAcl = + accessControlEntryService.findByName(namespace, accessControlEntry.getMetadata().getName()); + if (existingAcl.isPresent() && !existingAcl.get().getSpec().equals(accessControlEntry.getSpec())) { + throw new ResourceValidationException( + List.of("Invalid modification: `spec` is immutable. You can still update `metadata`"), + accessControlEntry.getKind(), accessControlEntry.getMetadata().getName()); } accessControlEntry.getMetadata().setCreationTimestamp(Date.from(Instant.now())); accessControlEntry.getMetadata().setCluster(ns.getMetadata().getCluster()); accessControlEntry.getMetadata().setNamespace(ns.getMetadata().getName()); - if (existingACL.isPresent() && existingACL.get().equals(accessControlEntry)) { - return formatHttpResponse(existingACL.get(), ApplyStatus.unchanged); + if (existingAcl.isPresent() && existingAcl.get().equals(accessControlEntry)) { + return formatHttpResponse(existingAcl.get(), ApplyStatus.unchanged); } - ApplyStatus status = existingACL.isPresent() ? ApplyStatus.changed : ApplyStatus.created; + ApplyStatus status = existingAcl.isPresent() ? ApplyStatus.changed : ApplyStatus.created; // Dry run checks if (dryrun) { @@ -140,55 +148,59 @@ public HttpResponse apply(Authentication authentication, Str } sendEventLog(accessControlEntry.getKind(), - accessControlEntry.getMetadata(), - status, - existingACL.map(AccessControlEntry::getSpec).orElse(null), - accessControlEntry.getSpec()); + accessControlEntry.getMetadata(), + status, + existingAcl.map(AccessControlEntry::getSpec).orElse(null), + accessControlEntry.getSpec()); // Store return formatHttpResponse(accessControlEntryService.create(accessControlEntry), status); } /** - * Delete an ACL + * Delete an ACL. + * * @param authentication The authentication entity - * @param namespace The namespace - * @param name The ACL name - * @param dryrun Is dry run mode or not ? + * @param namespace The namespace + * @param name The ACL name + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Delete("/{name}{?dryrun}") @Status(HttpStatus.NO_CONTENT) - public HttpResponse delete(Authentication authentication, String namespace, String name, @QueryValue(defaultValue = "false") boolean dryrun) { - Namespace ns = getNamespace(namespace); + public HttpResponse delete(Authentication authentication, String namespace, String name, + @QueryValue(defaultValue = "false") boolean dryrun) { AccessControlEntry accessControlEntry = accessControlEntryService - .findByName(namespace, name) - .orElseThrow(() -> new ResourceValidationException( - List.of("Invalid value " + name + " for name: ACL does not exist in this namespace."), - "AccessControlEntry", name)); + .findByName(namespace, name) + .orElseThrow(() -> new ResourceValidationException( + List.of("Invalid value " + name + " for name: ACL does not exist in this namespace."), + "AccessControlEntry", name)); List roles = (List) authentication.getAttributes().get("roles"); boolean isAdmin = roles.contains(ResourceBasedSecurityRule.IS_ADMIN); - // Self-assigned ACL (spec.grantedTo == metadata.namespace) - boolean isSelfAssignedACL = namespace.equals(accessControlEntry.getSpec().getGrantedTo()); + boolean isSelfAssignedAcl = namespace.equals(accessControlEntry.getSpec().getGrantedTo()); - if (isSelfAssignedACL && !isAdmin) { - // Prevent delete - throw new ResourceValidationException(List.of("Only admins can delete this ACL."), "AccessControlEntry", name); + if (isSelfAssignedAcl && !isAdmin) { + throw new ResourceValidationException(List.of("Only admins can delete this ACL."), "AccessControlEntry", + name); } if (dryrun) { return HttpResponse.noContent(); } - sendEventLog(accessControlEntry.getKind(), accessControlEntry.getMetadata(), ApplyStatus.deleted,accessControlEntry.getSpec(), null); - accessControlEntryService.delete(ns, accessControlEntry); + sendEventLog(accessControlEntry.getKind(), accessControlEntry.getMetadata(), ApplyStatus.deleted, + accessControlEntry.getSpec(), null); + accessControlEntryService.delete(getNamespace(namespace), accessControlEntry); return HttpResponse.noContent(); } + /** + * ACL scope. + */ public enum AclLimit { /** - * Returns all ACL + * Returns all ACL scopes. */ ALL, GRANTOR, diff --git a/src/main/java/com/michelin/ns4kafka/controllers/acl/AclNonNamespacedController.java b/src/main/java/com/michelin/ns4kafka/controllers/acl/AclNonNamespacedController.java index 44bedc8e..bda6a5b9 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/acl/AclNonNamespacedController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/acl/AclNonNamespacedController.java @@ -7,11 +7,13 @@ import io.micronaut.http.annotation.Controller; import io.micronaut.http.annotation.Get; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.inject.Inject; - import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; import java.util.List; +/** + * Non-namespaced controller to manage ACLs. + */ @Tag(name = "ACLs", description = "Manage the ACLs.") @Controller("/api/acls") @RolesAllowed(ResourceBasedSecurityRule.IS_ADMIN) @@ -20,7 +22,8 @@ public class AclNonNamespacedController extends NonNamespacedResourceController AccessControlEntryService accessControlEntryService; /** - * List ACLs + * List ACLs. + * * @return A list of ACLs */ @Get diff --git a/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterController.java b/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterController.java index 51c88b3b..948c1182 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterController.java @@ -12,13 +12,17 @@ import io.micronaut.core.util.StringUtils; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.micronaut.scheduling.TaskExecutors; import io.micronaut.scheduling.annotation.ExecuteOn; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; -import reactor.core.publisher.Mono; - import jakarta.validation.Valid; import java.time.Instant; import java.util.ArrayList; @@ -26,7 +30,11 @@ import java.util.List; import java.util.Optional; import java.util.stream.Collectors; +import reactor.core.publisher.Mono; +/** + * Controller to manage Kafka Connect clusters. + */ @Tag(name = "Connect Clusters", description = "Manage the Kafka Connect clusters.") @Controller(value = "/api/namespaces/{namespace}/connect-clusters") @ExecuteOn(TaskExecutors.IO) @@ -38,7 +46,8 @@ public class ConnectClusterController extends NamespacedResourceController { ConnectorService connectorService; /** - * List Kafka Connect clusters by namespace + * List Kafka Connect clusters by namespace. + * * @param namespace The namespace * @return A list of Kafka Connect clusters */ @@ -48,7 +57,8 @@ public List list(String namespace) { } /** - * Get a Kafka Connect clusters by namespace and name + * Get a Kafka Connect clusters by namespace and name. + * * @param namespace The namespace * @param connectCluster The name * @return A Kafka Connect cluster @@ -59,51 +69,58 @@ public Optional getConnectCluster(String namespace, String conne } /** - * Create a Kafka Connect cluster + * Create a Kafka Connect cluster. + * * @param namespace The namespace * @param connectCluster The connect worker * @param dryrun Does the creation is a dry run * @return The created Kafka Connect cluster */ @Post("/{?dryrun}") - public Mono> apply(String namespace, @Body @Valid ConnectCluster connectCluster, @QueryValue(defaultValue = "false") boolean dryrun) { + public Mono> apply(String namespace, @Body @Valid ConnectCluster connectCluster, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); List validationErrors = new ArrayList<>(); if (!connectClusterService.isNamespaceOwnerOfConnectCluster(ns, connectCluster.getMetadata().getName())) { - validationErrors.add(String.format("Namespace not owner of this Connect cluster %s.", connectCluster.getMetadata().getName())); + validationErrors.add(String.format("Namespace not owner of this Connect cluster %s.", + connectCluster.getMetadata().getName())); } return connectClusterService.validateConnectClusterCreation(connectCluster) - .flatMap(errors -> { - validationErrors.addAll(errors); - if (!validationErrors.isEmpty()) { - return Mono.error(new ResourceValidationException(validationErrors, connectCluster.getKind(), connectCluster.getMetadata().getName())); - } - - connectCluster.getMetadata().setCreationTimestamp(Date.from(Instant.now())); - connectCluster.getMetadata().setCluster(ns.getMetadata().getCluster()); - connectCluster.getMetadata().setNamespace(ns.getMetadata().getName()); - - Optional existingConnectCluster = connectClusterService.findByNamespaceAndNameOwner(ns, connectCluster.getMetadata().getName()); - if (existingConnectCluster.isPresent() && existingConnectCluster.get().equals(connectCluster)) { - return Mono.just(formatHttpResponse(existingConnectCluster.get(), ApplyStatus.unchanged)); - } - - ApplyStatus status = existingConnectCluster.isPresent() ? ApplyStatus.changed : ApplyStatus.created; - if (dryrun) { - return Mono.just(formatHttpResponse(connectCluster, status)); - } - - sendEventLog(connectCluster.getKind(), connectCluster.getMetadata(), status, existingConnectCluster.map(ConnectCluster::getSpec).orElse(null), - connectCluster.getSpec()); - - return Mono.just(formatHttpResponse(connectClusterService.create(connectCluster), status)); - }); + .flatMap(errors -> { + validationErrors.addAll(errors); + if (!validationErrors.isEmpty()) { + return Mono.error(new ResourceValidationException(validationErrors, connectCluster.getKind(), + connectCluster.getMetadata().getName())); + } + + connectCluster.getMetadata().setCreationTimestamp(Date.from(Instant.now())); + connectCluster.getMetadata().setCluster(ns.getMetadata().getCluster()); + connectCluster.getMetadata().setNamespace(ns.getMetadata().getName()); + + Optional existingConnectCluster = + connectClusterService.findByNamespaceAndNameOwner(ns, connectCluster.getMetadata().getName()); + if (existingConnectCluster.isPresent() && existingConnectCluster.get().equals(connectCluster)) { + return Mono.just(formatHttpResponse(existingConnectCluster.get(), ApplyStatus.unchanged)); + } + + ApplyStatus status = existingConnectCluster.isPresent() ? ApplyStatus.changed : ApplyStatus.created; + if (dryrun) { + return Mono.just(formatHttpResponse(connectCluster, status)); + } + + sendEventLog(connectCluster.getKind(), connectCluster.getMetadata(), status, + existingConnectCluster.map(ConnectCluster::getSpec).orElse(null), + connectCluster.getSpec()); + + return Mono.just(formatHttpResponse(connectClusterService.create(connectCluster), status)); + }); } /** - * Delete a Kafka Connect cluster + * Delete a Kafka Connect cluster. + * * @param namespace The current namespace * @param connectCluster The current connect cluster name to delete * @param dryrun Run in dry mode or not @@ -111,7 +128,8 @@ public Mono> apply(String namespace, @Body @Valid C */ @Status(HttpStatus.NO_CONTENT) @Delete("/{connectCluster}{?dryrun}") - public HttpResponse delete(String namespace, String connectCluster, @QueryValue(defaultValue = "false") boolean dryrun) { + public HttpResponse delete(String namespace, String connectCluster, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); List validationErrors = new ArrayList<>(); @@ -121,15 +139,20 @@ public HttpResponse delete(String namespace, String connectCluster, @Query List connectors = connectorService.findAllByConnectCluster(ns, connectCluster); if (!connectors.isEmpty()) { - validationErrors.add(String.format("The Connect cluster %s has %s deployed connector(s): %s. Please remove the associated connector(s) before deleting it.", connectCluster, connectors.size(), - connectors.stream().map(connector -> connector.getMetadata().getName()).collect(Collectors.joining(", ")))); + validationErrors.add(String.format( + "The Connect cluster %s has %s deployed connector(s): %s. " + + "Please remove the associated connector(s) before deleting it.", + connectCluster, connectors.size(), connectors.stream().map(connector -> + connector.getMetadata().getName()) + .collect(Collectors.joining(", ")))); } if (!validationErrors.isEmpty()) { throw new ResourceValidationException(validationErrors, "ConnectCluster", connectCluster); } - Optional optionalConnectCluster = connectClusterService.findByNamespaceAndNameOwner(ns, connectCluster); + Optional optionalConnectCluster = + connectClusterService.findByNamespaceAndNameOwner(ns, connectCluster); if (optionalConnectCluster.isEmpty()) { return HttpResponse.notFound(); } @@ -139,39 +162,44 @@ public HttpResponse delete(String namespace, String connectCluster, @Query } ConnectCluster connectClusterToDelete = optionalConnectCluster.get(); - sendEventLog(connectClusterToDelete.getKind(), connectClusterToDelete.getMetadata(), ApplyStatus.deleted, connectClusterToDelete.getSpec(), null); + sendEventLog(connectClusterToDelete.getKind(), connectClusterToDelete.getMetadata(), ApplyStatus.deleted, + connectClusterToDelete.getSpec(), null); connectClusterService.delete(connectClusterToDelete); return HttpResponse.noContent(); } /** - * List vault Kafka Connect clusters by namespace + * List vault Kafka Connect clusters by namespace. + * * @return A list of the available vault Kafka Connect clusters */ @Get("/_/vaults") public List listVaults(final String namespace) { final Namespace ns = getNamespace(namespace); return connectClusterService.findAllByNamespaceWrite(ns) - .stream() - .filter(connectCluster -> StringUtils.hasText(connectCluster.getSpec().getAes256Key())) - .toList(); + .stream() + .filter(connectCluster -> StringUtils.hasText(connectCluster.getSpec().getAes256Key())) + .toList(); } /** - * Encrypt a list of passwords + * Encrypt a list of passwords. + * * @param namespace The namespace. * @param connectCluster The name of the Kafka Connect cluster. * @param passwords The passwords to encrypt. * @return The encrypted password. */ @Post("/{connectCluster}/vaults") - public List vaultPassword(final String namespace, final String connectCluster, @Body final List passwords) { + public List vaultPassword(final String namespace, final String connectCluster, + @Body final List passwords) { final Namespace ns = getNamespace(namespace); final var validationErrors = new ArrayList(); if (!connectClusterService.isNamespaceAllowedForConnectCluster(ns, connectCluster)) { - validationErrors.add(String.format("Namespace is not allowed to use this Connect cluster %s.", connectCluster)); + validationErrors.add( + String.format("Namespace is not allowed to use this Connect cluster %s.", connectCluster)); } validationErrors.addAll(connectClusterService.validateConnectClusterVault(ns, connectCluster)); diff --git a/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterNonNamespacedController.java b/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterNonNamespacedController.java index 58c14d67..75c7ba29 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterNonNamespacedController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/connect/ConnectClusterNonNamespacedController.java @@ -10,11 +10,13 @@ import io.micronaut.scheduling.TaskExecutors; import io.micronaut.scheduling.annotation.ExecuteOn; import io.swagger.v3.oas.annotations.tags.Tag; +import jakarta.annotation.security.RolesAllowed; import jakarta.inject.Inject; import reactor.core.publisher.Flux; -import jakarta.annotation.security.RolesAllowed; - +/** + * Non-namespaced controller to manage Kafka Connect clusters. + */ @Tag(name = "Connect Clusters", description = "Manage the Kafka Connect clusters.") @Controller(value = "/api/connect-clusters") @ExecuteOn(TaskExecutors.IO) @@ -24,7 +26,8 @@ public class ConnectClusterNonNamespacedController extends NonNamespacedResource ConnectClusterService connectClusterService; /** - * List Kafka Connect clusters + * List Kafka Connect clusters. + * * @return A list of Kafka Connect clusters */ @Get("{?all}") diff --git a/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java b/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java index 2bdfea16..32063801 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/generic/NamespacedResourceController.java @@ -4,17 +4,20 @@ import com.michelin.ns4kafka.services.NamespaceService; import jakarta.inject.Inject; +/** + * Namespaced resource controller. + */ public abstract class NamespacedResourceController extends ResourceController { @Inject private NamespaceService namespaceService; /** * Call this to get the Namespace associated with the current request. + * * @param namespace the namespace String * @return the Namespace associated with the current request. - * @exception java.util.NoSuchElementException if the namespace does not exist */ - public Namespace getNamespace(String namespace){ + public Namespace getNamespace(String namespace) { return namespaceService.findByName(namespace).orElseThrow(); } } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java b/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java index 919c07ba..c3cb2fd9 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/generic/NonNamespacedResourceController.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.controllers.generic; /** - * Base Controller for all NonNamespaced resources + * Non namespaced resource controller. */ public abstract class NonNamespacedResourceController extends ResourceController { diff --git a/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java b/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java index e08ef47a..4b4f7713 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/generic/ResourceController.java @@ -8,10 +8,12 @@ import io.micronaut.http.HttpResponse; import io.micronaut.security.utils.SecurityService; import jakarta.inject.Inject; - import java.time.Instant; import java.util.Date; +/** + * Resource controller. + */ public abstract class ResourceController { private static final String STATUS_HEADER = "X-Ns4kafka-Result"; @@ -25,10 +27,19 @@ public HttpResponse formatHttpResponse(T body, ApplyStatus status) { return HttpResponse.ok(body).header(STATUS_HEADER, status.toString()); } + /** + * Send an audit log event. + * + * @param kind the kind of resource + * @param metadata the metadata of the resource + * @param operation the operation + * @param before the resource before the operation + * @param after the resource after the operation + */ public void sendEventLog(String kind, ObjectMeta metadata, ApplyStatus operation, Object before, Object after) { AuditLog auditLog = new AuditLog(securityService.username().orElse(""), - securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN), Date.from(Instant.now()), - kind, metadata, operation, before, after); + securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN), Date.from(Instant.now()), + kind, metadata, operation, before, after); applicationEventPublisher.publishEvent(auditLog); } } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaController.java b/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaController.java index 640cd44c..f6e5848a 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaController.java @@ -9,18 +9,26 @@ import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.micronaut.scheduling.TaskExecutors; import io.micronaut.scheduling.annotation.ExecuteOn; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; - import jakarta.validation.Valid; import java.time.Instant; import java.util.Date; import java.util.List; import java.util.Optional; +/** + * Resource quota controller. + */ @Tag(name = "Quotas", description = "Manage the resource quotas.") @Controller(value = "/api/namespaces/{namespace}/resource-quotas") @ExecuteOn(TaskExecutors.IO) @@ -29,7 +37,8 @@ public class ResourceQuotaController extends NamespacedResourceController { ResourceQuotaService resourceQuotaService; /** - * List quotas by namespace + * List quotas by namespace. + * * @param namespace The namespace * @return A list of quotas */ @@ -37,13 +46,14 @@ public class ResourceQuotaController extends NamespacedResourceController { public List list(String namespace) { Namespace ns = getNamespace(namespace); return List.of(resourceQuotaService.getUsedResourcesByQuotaByNamespace(ns, - resourceQuotaService.findByNamespace(namespace))); + resourceQuotaService.findByNamespace(namespace))); } /** - * Get a quota by namespace and name + * Get a quota by namespace and name. + * * @param namespace The name - * @param quota The quota name + * @param quota The quota name * @return A quota */ @Get("/{quota}") @@ -57,14 +67,16 @@ public Optional get(String namespace, String quota) { } /** - * Create a quota + * Create a quota. + * * @param namespace The namespace - * @param quota The resource quota - * @param dryrun Does the creation is a dry run + * @param quota The resource quota + * @param dryrun Does the creation is a dry run * @return The created quota */ @Post("{?dryrun}") - public HttpResponse apply(String namespace, @Body @Valid ResourceQuota quota, @QueryValue(defaultValue = "false") boolean dryrun){ + public HttpResponse apply(String namespace, @Body @Valid ResourceQuota quota, + @QueryValue(defaultValue = "false") boolean dryrun) { Namespace ns = getNamespace(namespace); quota.getMetadata().setCreationTimestamp(Date.from(Instant.now())); @@ -87,21 +99,23 @@ public HttpResponse apply(String namespace, @Body @Valid Resource } sendEventLog(quota.getKind(), quota.getMetadata(), status, - resourceQuotaOptional.map(ResourceQuota::getSpec).orElse(null), quota.getSpec()); + resourceQuotaOptional.map(ResourceQuota::getSpec).orElse(null), quota.getSpec()); return formatHttpResponse(resourceQuotaService.create(quota), status); } /** - * Delete a quota + * Delete a quota. + * * @param namespace The namespace - * @param name The resource quota - * @param dryrun Is dry run mode or not ? + * @param name The resource quota + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Delete("/{name}{?dryrun}") @Status(HttpStatus.NO_CONTENT) - public HttpResponse delete(String namespace, String name, @QueryValue(defaultValue = "false") boolean dryrun) { + public HttpResponse delete(String namespace, String name, + @QueryValue(defaultValue = "false") boolean dryrun) { Optional resourceQuota = resourceQuotaService.findByName(namespace, name); if (resourceQuota.isEmpty()) { return HttpResponse.notFound(); @@ -112,8 +126,8 @@ public HttpResponse delete(String namespace, String name, @QueryValue(defa } ResourceQuota resourceQuotaToDelete = resourceQuota.get(); - sendEventLog(resourceQuotaToDelete .getKind(), resourceQuotaToDelete.getMetadata(), ApplyStatus.deleted, - resourceQuotaToDelete.getSpec(), null); + sendEventLog(resourceQuotaToDelete.getKind(), resourceQuotaToDelete.getMetadata(), ApplyStatus.deleted, + resourceQuotaToDelete.getSpec(), null); resourceQuotaService.delete(resourceQuotaToDelete); return HttpResponse.noContent(); } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaNonNamespacedController.java b/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaNonNamespacedController.java index 24a57906..18733563 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaNonNamespacedController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/quota/ResourceQuotaNonNamespacedController.java @@ -3,15 +3,18 @@ import com.michelin.ns4kafka.controllers.generic.NonNamespacedResourceController; import com.michelin.ns4kafka.models.quota.ResourceQuotaResponse; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; +import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.ResourceQuotaService; import io.micronaut.http.annotation.Controller; import io.micronaut.http.annotation.Get; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.inject.Inject; - import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; import java.util.List; +/** + * Non namespaced resource quota controller. + */ @Tag(name = "Quotas", description = "Manage the resource quotas.") @Controller(value = "/api/resource-quotas") @RolesAllowed(ResourceBasedSecurityRule.IS_ADMIN) @@ -19,12 +22,16 @@ public class ResourceQuotaNonNamespacedController extends NonNamespacedResourceC @Inject ResourceQuotaService resourceQuotaService; + @Inject + NamespaceService namespaceService; + /** - * List quotas + * List quotas. + * * @return A list of quotas */ @Get public List listAll() { - return resourceQuotaService.getUsedResourcesByQuotaForAllNamespaces(); + return resourceQuotaService.getUsedQuotaByNamespaces(namespaceService.listAll()); } } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java index 5dd36aff..16b5876d 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicController.java @@ -10,18 +10,30 @@ import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; -import io.micronaut.http.annotation.*; +import io.micronaut.http.annotation.Body; +import io.micronaut.http.annotation.Controller; +import io.micronaut.http.annotation.Delete; +import io.micronaut.http.annotation.Get; +import io.micronaut.http.annotation.Post; +import io.micronaut.http.annotation.QueryValue; +import io.micronaut.http.annotation.Status; import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.inject.Inject; -import org.apache.kafka.common.TopicPartition; - import jakarta.validation.Valid; import java.time.Instant; -import java.util.*; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; +import org.apache.kafka.common.TopicPartition; +/** + * Controller to manage topics. + */ @Tag(name = "Topics", description = "Manage the topics.") @Controller(value = "/api/namespaces/{namespace}/topics") public class TopicController extends NamespacedResourceController { @@ -32,7 +44,8 @@ public class TopicController extends NamespacedResourceController { ResourceQuotaService resourceQuotaService; /** - * List topics by namespace + * List topics by namespace. + * * @param namespace The namespace * @return A list of topics */ @@ -43,9 +56,10 @@ public List list(String namespace) { } /** - * Get a topic by namespace and name + * Get a topic by namespace and name. + * * @param namespace The name - * @param topic The topic name + * @param topic The topic name * @return The topic */ @Get("/{topic}") @@ -55,34 +69,40 @@ public Optional getTopic(String namespace, String topic) { } /** - * Create a topic + * Create a topic. + * * @param namespace The namespace - * @param topic The topic - * @param dryrun Is dry run mode or not ? + * @param topic The topic + * @param dryrun Is dry run mode or not ? * @return The created topic */ @Post - public HttpResponse apply(String namespace, @Valid @Body Topic topic, @QueryValue(defaultValue = "false") boolean dryrun) throws InterruptedException, ExecutionException, TimeoutException { + public HttpResponse apply(String namespace, @Valid @Body Topic topic, + @QueryValue(defaultValue = "false") boolean dryrun) + throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = getNamespace(namespace); Optional existingTopic = topicService.findByName(ns, topic.getMetadata().getName()); // Request is valid ? - List validationErrors = ns.getSpec().getTopicValidator() != null ? ns.getSpec().getTopicValidator().validate(topic) + List validationErrors = + ns.getSpec().getTopicValidator() != null ? ns.getSpec().getTopicValidator().validate(topic) : new ArrayList<>(); if (existingTopic.isEmpty()) { // Topic namespace ownership validation if (!topicService.isNamespaceOwnerOfTopic(namespace, topic.getMetadata().getName())) { - validationErrors.add(String.format("Namespace not owner of this topic %s.", topic.getMetadata().getName())); + validationErrors.add( + String.format("Namespace not owner of this topic %s.", topic.getMetadata().getName())); } // Topic names with a period ('.') or underscore ('_') could collide List collidingTopics = topicService.findCollidingTopics(ns, topic); if (!collidingTopics.isEmpty()) { validationErrors.addAll(collidingTopics.stream() - .map(collidingTopic -> String.format("Topic %s collides with existing topics: %s.", topic.getMetadata().getName(), collidingTopic)) - .toList()); + .map(collidingTopic -> String.format("Topic %s collides with existing topics: %s.", + topic.getMetadata().getName(), collidingTopic)) + .toList()); } } else { validationErrors.addAll(topicService.validateTopicUpdate(ns, existingTopic.get(), topic)); @@ -113,27 +133,31 @@ public HttpResponse apply(String namespace, @Valid @Body Topic topic, @Qu } sendEventLog(topic.getKind(), - topic.getMetadata(), - status, - existingTopic.map(Topic::getSpec).orElse(null), - topic.getSpec()); + topic.getMetadata(), + status, + existingTopic.map(Topic::getSpec).orElse(null), + topic.getSpec()); return formatHttpResponse(topicService.create(topic), status); } /** - * Delete a topic + * Delete a topic. + * * @param namespace The namespace - * @param topic The topic - * @param dryrun Is dry run mode or not ? + * @param topic The topic + * @param dryrun Is dry run mode or not ? * @return An HTTP response */ @Status(HttpStatus.NO_CONTENT) @Delete("/{topic}{?dryrun}") - public HttpResponse deleteTopic(String namespace, String topic, @QueryValue(defaultValue = "false") boolean dryrun) throws InterruptedException, ExecutionException, TimeoutException { + public HttpResponse deleteTopic(String namespace, String topic, + @QueryValue(defaultValue = "false") boolean dryrun) + throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = getNamespace(namespace); if (!topicService.isNamespaceOwnerOfTopic(namespace, topic)) { - throw new ResourceValidationException(List.of("Namespace not owner of this topic \"" + topic + "\"."), "Topic", topic); + throw new ResourceValidationException(List.of("Namespace not owner of this topic \"" + topic + "\"."), + "Topic", topic); } Optional optionalTopic = topicService.findByName(ns, topic); @@ -148,27 +172,28 @@ public HttpResponse deleteTopic(String namespace, String topic, @QueryValu Topic topicToDelete = optionalTopic.get(); sendEventLog(topicToDelete.getKind(), - topicToDelete.getMetadata(), - ApplyStatus.deleted, - topicToDelete.getSpec(), - null); + topicToDelete.getMetadata(), + ApplyStatus.deleted, + topicToDelete.getSpec(), + null); topicService.delete(optionalTopic.get()); return HttpResponse.noContent(); } /** - * Import unsynchronized topics + * Import unsynchronized topics. + * * @param namespace The namespace - * @param dryrun Is dry run mode or not ? + * @param dryrun Is dry run mode or not ? * @return The list of imported topics - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception - * @throws TimeoutException Any timeout exception + * @throws TimeoutException Any timeout exception */ @Post("/_/import{?dryrun}") public List importResources(String namespace, @QueryValue(defaultValue = "false") boolean dryrun) - throws ExecutionException, InterruptedException, TimeoutException { + throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = getNamespace(namespace); List unsynchronizedTopics = topicService.listUnsynchronizedTopics(ns); @@ -184,28 +209,32 @@ public List importResources(String namespace, @QueryValue(defaultValue = } return unsynchronizedTopics - .stream() - .map(topic -> { - sendEventLog("Topic", topic.getMetadata(), ApplyStatus.created, null, topic.getSpec()); - return topicService.create(topic); - }) - .toList(); + .stream() + .map(topic -> { + sendEventLog("Topic", topic.getMetadata(), ApplyStatus.created, null, topic.getSpec()); + return topicService.create(topic); + }) + .toList(); } /** - * Delete records from topic + * Delete records from topic. + * * @param namespace The namespace - * @param topic The topic - * @param dryrun Is dry run mode or not ? + * @param topic The topic + * @param dryrun Is dry run mode or not ? * @return The list of topic-partitions where records have been deleted - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception */ @Post("{topic}/delete-records{?dryrun}") - public List deleteRecords(String namespace, String topic, @QueryValue(defaultValue = "false") boolean dryrun) throws InterruptedException, ExecutionException { + public List deleteRecords(String namespace, String topic, + @QueryValue(defaultValue = "false") boolean dryrun) + throws InterruptedException, ExecutionException { Namespace ns = getNamespace(namespace); if (!topicService.isNamespaceOwnerOfTopic(namespace, topic)) { - throw new ResourceValidationException(List.of("Namespace not owner of this topic \"" + topic + "\"."), "Topic", topic); + throw new ResourceValidationException(List.of("Namespace not owner of this topic \"" + topic + "\"."), + "Topic", topic); } Optional optionalTopic = topicService.findByName(ns, topic); @@ -216,7 +245,8 @@ public List deleteRecords(String namespace, String topic, Topic deleteRecordsTopic = optionalTopic.get(); List validationErrors = topicService.validateDeleteRecordsTopic(deleteRecordsTopic); if (!validationErrors.isEmpty()) { - throw new ResourceValidationException(validationErrors, deleteRecordsTopic.getKind(), deleteRecordsTopic.getMetadata().getName()); + throw new ResourceValidationException(validationErrors, deleteRecordsTopic.getKind(), + deleteRecordsTopic.getMetadata().getName()); } Map recordsToDelete = topicService.prepareRecordsToDelete(optionalTopic.get()); @@ -230,14 +260,14 @@ public List deleteRecords(String namespace, String topic, } return deletedRecords.entrySet() - .stream() - .map(entry -> DeleteRecordsResponse.builder() - .spec(DeleteRecordsResponse.DeleteRecordsResponseSpec.builder() - .topic(entry.getKey().topic()) - .partition(entry.getKey().partition()) - .offset(entry.getValue()) - .build()) - .build()) - .collect(Collectors.toList()); + .stream() + .map(entry -> DeleteRecordsResponse.builder() + .spec(DeleteRecordsResponse.DeleteRecordsResponseSpec.builder() + .topic(entry.getKey().topic()) + .partition(entry.getKey().partition()) + .offset(entry.getValue()) + .build()) + .build()) + .collect(Collectors.toList()); } } diff --git a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicNonNamespacedController.java b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicNonNamespacedController.java index 5c634ad9..1c545c9f 100644 --- a/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicNonNamespacedController.java +++ b/src/main/java/com/michelin/ns4kafka/controllers/topic/TopicNonNamespacedController.java @@ -7,11 +7,13 @@ import io.micronaut.http.annotation.Controller; import io.micronaut.http.annotation.Get; import io.swagger.v3.oas.annotations.tags.Tag; -import jakarta.inject.Inject; - import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; import java.util.List; +/** + * Non namespaced controller for topics. + */ @Tag(name = "Topics", description = "Manage the topics.") @Controller(value = "/api/topics") @RolesAllowed(ResourceBasedSecurityRule.IS_ADMIN) @@ -20,7 +22,8 @@ public class TopicNonNamespacedController extends NonNamespacedResourceControlle TopicService topicService; /** - * List topics + * List topics. + * * @return A list of topics */ @Get diff --git a/src/main/java/com/michelin/ns4kafka/logs/ConsoleLogListener.java b/src/main/java/com/michelin/ns4kafka/logs/ConsoleLogListener.java index f3654627..20a8c6c2 100644 --- a/src/main/java/com/michelin/ns4kafka/logs/ConsoleLogListener.java +++ b/src/main/java/com/michelin/ns4kafka/logs/ConsoleLogListener.java @@ -7,6 +7,9 @@ import jakarta.inject.Singleton; import lombok.extern.slf4j.Slf4j; +/** + * Console log listener. + */ @Slf4j @Singleton @Requires(property = "ns4kafka.log.console.enabled", notEquals = StringUtils.FALSE) @@ -15,13 +18,13 @@ public class ConsoleLogListener implements ApplicationEventListener { @Override public void onApplicationEvent(AuditLog event) { log.info("{} {} {} {} {} in namespace {} on cluster {}", - event.isAdmin() ? "Admin" : "User", - event.getUser(), - event.getOperation(), - event.getKind(), - event.getMetadata().getName(), - event.getMetadata().getNamespace(), - event.getMetadata().getCluster() + event.isAdmin() ? "Admin" : "User", + event.getUser(), + event.getOperation(), + event.getKind(), + event.getMetadata().getName(), + event.getMetadata().getNamespace(), + event.getMetadata().getCluster() ); } } diff --git a/src/main/java/com/michelin/ns4kafka/logs/KafkaLogListener.java b/src/main/java/com/michelin/ns4kafka/logs/KafkaLogListener.java index cf21aad9..35bd17a2 100644 --- a/src/main/java/com/michelin/ns4kafka/logs/KafkaLogListener.java +++ b/src/main/java/com/michelin/ns4kafka/logs/KafkaLogListener.java @@ -1,9 +1,6 @@ package com.michelin.ns4kafka.logs; import com.michelin.ns4kafka.models.AuditLog; -import io.micronaut.configuration.kafka.annotation.KafkaClient; -import io.micronaut.configuration.kafka.annotation.KafkaKey; -import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Requires; import io.micronaut.context.event.ApplicationEventListener; import io.micronaut.core.util.StringUtils; @@ -11,6 +8,9 @@ import jakarta.inject.Inject; import jakarta.inject.Singleton; +/** + * Kafka Log Listener. + */ @Singleton @Requires(property = "ns4kafka.log.kafka.enabled", value = StringUtils.TRUE) public class KafkaLogListener implements ApplicationEventListener { @@ -24,10 +24,4 @@ public void onApplicationEvent(AuditLog event) { } } -@KafkaClient -@Requires(property = "ns4kafka.log.kafka.enabled", value = StringUtils.TRUE) -interface KafkaLogProducer { - @Topic(value = "${ns4kafka.log.kafka.topic}") - void sendAuditLog(@KafkaKey String namespace, AuditLog log); -} diff --git a/src/main/java/com/michelin/ns4kafka/logs/KafkaLogProducer.java b/src/main/java/com/michelin/ns4kafka/logs/KafkaLogProducer.java new file mode 100644 index 00000000..92b3cbae --- /dev/null +++ b/src/main/java/com/michelin/ns4kafka/logs/KafkaLogProducer.java @@ -0,0 +1,16 @@ +package com.michelin.ns4kafka.logs; + +import com.michelin.ns4kafka.models.AuditLog; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaKey; +import io.micronaut.configuration.kafka.annotation.Topic; +import io.micronaut.context.annotation.Requires; +import io.micronaut.core.util.StringUtils; + +@KafkaClient +@Requires(property = "ns4kafka.log.kafka.enabled", value = StringUtils.TRUE) +interface KafkaLogProducer { + + @Topic(value = "${ns4kafka.log.kafka.topic}") + void sendAuditLog(@KafkaKey String namespace, AuditLog log); +} \ No newline at end of file diff --git a/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java b/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java index 38bb01d3..8c11fc94 100644 --- a/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java +++ b/src/main/java/com/michelin/ns4kafka/models/AccessControlEntry.java @@ -1,17 +1,22 @@ package com.michelin.ns4kafka.models; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; -@Introspected +/** + * Access control entry. + */ +@Data @Builder +@Introspected @NoArgsConstructor @AllArgsConstructor -@Data public class AccessControlEntry { private final String apiVersion = "v1"; private final String kind = "AccessControlEntry"; @@ -24,6 +29,38 @@ public class AccessControlEntry { @NotNull private AccessControlEntrySpec spec; + /** + * Resource type managed by Ns4kafka. + */ + public enum ResourceType { + TOPIC, + GROUP, + CONNECT, + CONNECT_CLUSTER, + SCHEMA, + TRANSACTIONAL_ID + } + + /** + * Resource pattern type. + */ + public enum ResourcePatternType { + LITERAL, + PREFIXED + } + + /** + * Permission. + */ + public enum Permission { + OWNER, + READ, + WRITE + } + + /** + * Access control entry specification. + */ @Data @Builder @Introspected @@ -47,24 +84,4 @@ public static class AccessControlEntrySpec { @NotNull protected String grantedTo; } - - public enum ResourceType { - TOPIC, - GROUP, - CONNECT, - CONNECT_CLUSTER, - SCHEMA, - TRANSACTIONAL_ID - } - - public enum ResourcePatternType { - LITERAL, - PREFIXED - } - - public enum Permission { - OWNER, - READ, - WRITE - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/AuditLog.java b/src/main/java/com/michelin/ns4kafka/models/AuditLog.java index 85b222c4..0c4a197f 100644 --- a/src/main/java/com/michelin/ns4kafka/models/AuditLog.java +++ b/src/main/java/com/michelin/ns4kafka/models/AuditLog.java @@ -2,11 +2,13 @@ import com.fasterxml.jackson.annotation.JsonFormat; import com.michelin.ns4kafka.utils.enums.ApplyStatus; +import java.util.Date; import lombok.AllArgsConstructor; import lombok.Data; -import java.util.Date; - +/** + * Audit log. + */ @Data @AllArgsConstructor public class AuditLog { diff --git a/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java b/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java index 89d504fc..e738efd0 100644 --- a/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java +++ b/src/main/java/com/michelin/ns4kafka/models/DeleteRecordsResponse.java @@ -1,11 +1,17 @@ package com.michelin.ns4kafka.models; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.ToString; +/** + * Delete records response. + */ @Getter @Builder @Introspected @@ -23,6 +29,9 @@ public class DeleteRecordsResponse { @NotNull private DeleteRecordsResponseSpec spec; + /** + * Delete records response specification. + */ @Getter @Builder @ToString diff --git a/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java b/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java index 913d8427..b19d3ee7 100644 --- a/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java +++ b/src/main/java/com/michelin/ns4kafka/models/KafkaStream.java @@ -1,11 +1,16 @@ package com.michelin.ns4kafka.models; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +/** + * Kafka Stream. + */ @Data @Builder @Introspected diff --git a/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java b/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java index 8e80256b..431b0f73 100644 --- a/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java +++ b/src/main/java/com/michelin/ns4kafka/models/KafkaUserResetPassword.java @@ -1,8 +1,15 @@ package com.michelin.ns4kafka.models; import io.micronaut.core.annotation.Introspected; -import lombok.*; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +/** + * Kafka user reset password. + */ @Getter @Setter @Builder @@ -15,6 +22,9 @@ public class KafkaUserResetPassword { private ObjectMeta metadata; private KafkaUserResetPasswordSpec spec; + /** + * Kafka user reset password spec. + */ @Getter @Builder @Introspected diff --git a/src/main/java/com/michelin/ns4kafka/models/Namespace.java b/src/main/java/com/michelin/ns4kafka/models/Namespace.java index 05652cc2..d4449bda 100644 --- a/src/main/java/com/michelin/ns4kafka/models/Namespace.java +++ b/src/main/java/com/michelin/ns4kafka/models/Namespace.java @@ -3,16 +3,18 @@ import com.michelin.ns4kafka.validation.ConnectValidator; import com.michelin.ns4kafka.validation.TopicValidator; import io.micronaut.core.annotation.Introspected; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.NotNull; import java.util.List; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +/** + * Namespace. + */ @Data @Builder @Introspected @@ -30,6 +32,9 @@ public class Namespace { @NotNull private NamespaceSpec spec; + /** + * Namespace spec. + */ @Data @Builder @Introspected diff --git a/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java b/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java index eefd14fc..bfd73315 100644 --- a/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java +++ b/src/main/java/com/michelin/ns4kafka/models/ObjectMeta.java @@ -2,13 +2,19 @@ import com.fasterxml.jackson.annotation.JsonFormat; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.Pattern; import java.util.Date; import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; +/** + * Object metadata. + */ @Data @Builder @Introspected @@ -20,7 +26,7 @@ public class ObjectMeta { private String name; private String namespace; private String cluster; - private Map labels; + private Map labels; @EqualsAndHashCode.Exclude private int generation; @EqualsAndHashCode.Exclude diff --git a/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java b/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java index 7dcf65f7..92f902db 100644 --- a/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java +++ b/src/main/java/com/michelin/ns4kafka/models/RoleBinding.java @@ -1,23 +1,24 @@ package com.michelin.ns4kafka.models; import io.micronaut.core.annotation.Introspected; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.NotEmpty; import jakarta.validation.constraints.NotNull; import java.util.Collection; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; - -@Introspected(classes = {RoleBinding.class, RoleBinding.RoleBindingSpec.class, RoleBinding.Role.class, RoleBinding.Subject.class}) +/** + * Role binding. + */ +@Data @Builder -@AllArgsConstructor +@Introspected @NoArgsConstructor -@Data +@AllArgsConstructor public class RoleBinding { private final String apiVersion = "v1"; private final String kind = "RoleBinding"; @@ -30,6 +31,27 @@ public class RoleBinding { @NotNull private RoleBindingSpec spec; + /** + * HTTP verbs. + */ + public enum Verb { + GET, + POST, + PUT, + DELETE + } + + /** + * Subject type. + */ + public enum SubjectType { + GROUP, + USER + } + + /** + * Role binding spec. + */ @Data @Builder @Introspected @@ -45,10 +67,14 @@ public static class RoleBindingSpec { private Subject subject; } + /** + * Role. + */ + @Data @Builder - @AllArgsConstructor + @Introspected @NoArgsConstructor - @Data + @AllArgsConstructor public static class Role { @NotNull @NotEmpty @@ -59,10 +85,14 @@ public static class Role { private Collection verbs; } + /** + * Subject. + */ + @Data @Builder - @AllArgsConstructor + @Introspected @NoArgsConstructor - @Data + @AllArgsConstructor public static class Subject { @NotNull private SubjectType subjectType; @@ -72,16 +102,4 @@ public static class Subject { private String subjectName; } - - public enum Verb { - GET, - POST, - PUT, - DELETE - } - - public enum SubjectType { - GROUP, - USER - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/Status.java b/src/main/java/com/michelin/ns4kafka/models/Status.java index 6c183bdd..82378a90 100644 --- a/src/main/java/com/michelin/ns4kafka/models/Status.java +++ b/src/main/java/com/michelin/ns4kafka/models/Status.java @@ -1,13 +1,15 @@ package com.michelin.ns4kafka.models; import io.micronaut.core.annotation.Introspected; +import java.util.List; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.List; - +/** + * Status. + */ @Data @Builder @Introspected @@ -22,21 +24,17 @@ public class Status { private StatusDetails details; private int code; - @Data - @Builder - @AllArgsConstructor - @NoArgsConstructor - public static class StatusDetails { - private String name; - private String kind; - private List causes; - } - + /** + * Status phase. + */ public enum StatusPhase { Success, Failed } + /** + * Status reason. + */ public enum StatusReason { BadRequest, Unauthorized, @@ -50,4 +48,17 @@ public enum StatusReason { MethodNotAllowed, InternalError } + + /** + * Status details. + */ + @Data + @Builder + @AllArgsConstructor + @NoArgsConstructor + public static class StatusDetails { + private String name; + private String kind; + private List causes; + } } diff --git a/src/main/java/com/michelin/ns4kafka/models/Topic.java b/src/main/java/com/michelin/ns4kafka/models/Topic.java index a569a0da..eda33ca1 100644 --- a/src/main/java/com/michelin/ns4kafka/models/Topic.java +++ b/src/main/java/com/michelin/ns4kafka/models/Topic.java @@ -3,14 +3,22 @@ import com.fasterxml.jackson.annotation.JsonFormat; import io.micronaut.core.annotation.Introspected; import io.swagger.v3.oas.annotations.media.Schema; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; import java.time.Instant; import java.util.Date; import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +/** + * Topic. + */ @Data @Builder @Introspected @@ -30,6 +38,18 @@ public class Topic { @EqualsAndHashCode.Exclude private TopicStatus status; + /** + * Topic phase. + */ + public enum TopicPhase { + Pending, + Success, + Failed + } + + /** + * Topic spec. + */ @Data @Builder @NoArgsConstructor @@ -40,11 +60,14 @@ public static class TopicSpec { private Map configs; } + /** + * Topic status. + */ + @Getter + @Setter @Builder @AllArgsConstructor @NoArgsConstructor - @Getter - @Setter @Schema(description = "Server-side", accessMode = Schema.AccessMode.READ_ONLY) public static class TopicStatus { private TopicPhase phase; @@ -54,46 +77,43 @@ public static class TopicStatus { private Date lastUpdateTime; /** - * Success status + * Success status. + * * @param message A success message * @return A success topic status */ public static TopicStatus ofSuccess(String message) { return TopicStatus.builder() - .phase(TopicPhase.Success) - .message(message) - .lastUpdateTime(Date.from(Instant.now())) - .build(); + .phase(TopicPhase.Success) + .message(message) + .lastUpdateTime(Date.from(Instant.now())) + .build(); } /** - * Failed status + * Failed status. + * * @param message A failure message * @return A failure topic status */ public static TopicStatus ofFailed(String message) { return TopicStatus.builder() - .phase(TopicPhase.Failed) - .message(message) - .lastUpdateTime(Date.from(Instant.now())) - .build(); + .phase(TopicPhase.Failed) + .message(message) + .lastUpdateTime(Date.from(Instant.now())) + .build(); } /** - * Pending status + * Pending status. + * * @return A pending topic status */ public static TopicStatus ofPending() { return Topic.TopicStatus.builder() - .phase(Topic.TopicPhase.Pending) - .message("Awaiting processing by executor") - .build(); + .phase(Topic.TopicPhase.Pending) + .message("Awaiting processing by executor") + .build(); } } - - public enum TopicPhase { - Pending, - Success, - Failed - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/connect/cluster/ConnectCluster.java b/src/main/java/com/michelin/ns4kafka/models/connect/cluster/ConnectCluster.java index 05c32449..7653ef33 100644 --- a/src/main/java/com/michelin/ns4kafka/models/connect/cluster/ConnectCluster.java +++ b/src/main/java/com/michelin/ns4kafka/models/connect/cluster/ConnectCluster.java @@ -2,11 +2,17 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; +/** + * Kafka Connect Cluster. + */ @Data @Builder @Introspected @@ -24,6 +30,17 @@ public class ConnectCluster { @NotNull private ConnectClusterSpec spec; + /** + * Kafka Connect status. + */ + public enum Status { + HEALTHY, + IDLE + } + + /** + * Kafka Connect Cluster specification. + */ @Data @Builder @Introspected @@ -47,13 +64,13 @@ public static class ConnectClusterSpec { String password; /** - * Gets the Kafka Connect status + * Gets the Kafka Connect status. */ @EqualsAndHashCode.Exclude Status status; /** - * Gets the Kafka Connect status context message + * Gets the Kafka Connect status context message. */ @EqualsAndHashCode.Exclude String statusMessage; @@ -73,9 +90,4 @@ public static class ConnectClusterSpec { */ String aes256Format; } - - public enum Status { - HEALTHY, - IDLE - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/connect/cluster/VaultResponse.java b/src/main/java/com/michelin/ns4kafka/models/connect/cluster/VaultResponse.java index 3afbcd1f..c80f266e 100644 --- a/src/main/java/com/michelin/ns4kafka/models/connect/cluster/VaultResponse.java +++ b/src/main/java/com/michelin/ns4kafka/models/connect/cluster/VaultResponse.java @@ -2,10 +2,13 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.ToString; /** * Represents the Kafka Connect Cluster Vault Response. @@ -16,18 +19,11 @@ @NoArgsConstructor @AllArgsConstructor public class VaultResponse { - /** - * The API version. - */ private final String apiVersion = "v1"; - - /** - * The Vault Response ns4kafka kind. - */ private final String kind = "VaultResponse"; /** - * The object metadata + * The object metadata. */ @Valid @NotNull diff --git a/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java b/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java index dc087380..dedd50db 100644 --- a/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java +++ b/src/main/java/com/michelin/ns4kafka/models/connector/ChangeConnectorState.java @@ -3,14 +3,16 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; import io.micronaut.http.HttpStatus; +import jakarta.validation.Valid; +import jakarta.validation.constraints.NotNull; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotNull; - +/** + * Change connector state. + */ @Data @Builder @Introspected @@ -29,6 +31,18 @@ public class ChangeConnectorState { private ChangeConnectorStateSpec spec; private ChangeConnectorStateStatus status; + /** + * Connector action. + */ + public enum ConnectorAction { + pause, + resume, + restart + } + + /** + * Change connector state specification. + */ @Data @Builder @Introspected @@ -39,6 +53,9 @@ public static class ChangeConnectorStateSpec { private ConnectorAction action; } + /** + * Change connector state status. + */ @Data @Builder @Introspected @@ -49,10 +66,4 @@ public static class ChangeConnectorStateStatus { private HttpStatus code; private String errorMessage; } - - public enum ConnectorAction { - pause, - resume, - restart - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java b/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java index 8157cb36..91a5ecd3 100644 --- a/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java +++ b/src/main/java/com/michelin/ns4kafka/models/connector/Connector.java @@ -4,15 +4,23 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.NotNull; import java.util.Date; import java.util.List; import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +/** + * Connector. + */ @Data @Builder @Introspected @@ -33,6 +41,21 @@ public class Connector { @EqualsAndHashCode.Exclude private ConnectorStatus status; + /** + * Connector task state. + */ + public enum TaskState { + // From https://github.com/apache/kafka/blob/trunk/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java + UNASSIGNED, + RUNNING, + PAUSED, + FAILED, + DESTROYED, + } + + /** + * Connector specification. + */ @Data @Builder @Introspected @@ -47,6 +70,9 @@ public static class ConnectorSpec { private Map config; } + /** + * Connector status. + */ @Getter @Setter @Builder @@ -55,7 +81,7 @@ public static class ConnectorSpec { @AllArgsConstructor public static class ConnectorStatus { private TaskState state; - private String worker_id; + private String workerId; private List tasks; @JsonFormat(shape = JsonFormat.Shape.STRING) @@ -63,6 +89,9 @@ public static class ConnectorStatus { } + /** + * Connector task status. + */ @Getter @Setter @Builder @@ -73,15 +102,6 @@ public static class TaskStatus { String id; TaskState state; String trace; - String worker_id; - } - - public enum TaskState { - // From https://github.com/apache/kafka/blob/trunk/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java - UNASSIGNED, - RUNNING, - PAUSED, - FAILED, - DESTROYED, + String workerId; } } diff --git a/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java b/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java index 14b0f605..b6d34e7a 100644 --- a/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java +++ b/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsets.java @@ -2,12 +2,20 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +import lombok.ToString; +/** + * Consumer group reset offsets. + */ @Data @Builder @Introspected @@ -25,6 +33,21 @@ public class ConsumerGroupResetOffsets { @NotNull private ConsumerGroupResetOffsetsSpec spec; + /** + * Represents the reset offsets method. + */ + public enum ResetOffsetsMethod { + TO_EARLIEST, + TO_LATEST, + TO_DATETIME, // string:yyyy-MM-ddTHH:mm:SS.sss + BY_DURATION, + SHIFT_BY, + TO_OFFSET + } + + /** + * Consumer group reset offsets specification. + */ @Getter @Setter @Builder @@ -41,13 +64,4 @@ public static class ConsumerGroupResetOffsetsSpec { private ResetOffsetsMethod method; private String options; } - - public enum ResetOffsetsMethod { - TO_EARLIEST, - TO_LATEST, - TO_DATETIME, // string:yyyy-MM-ddTHH:mm:SS.sss - BY_DURATION, - SHIFT_BY, - TO_OFFSET - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsetsResponse.java b/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsetsResponse.java index eb4e8065..690b41ce 100644 --- a/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsetsResponse.java +++ b/src/main/java/com/michelin/ns4kafka/models/consumer/group/ConsumerGroupResetOffsetsResponse.java @@ -2,41 +2,43 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.ToString; -@Introspected -@Builder +/** + * Consumer group reset offsets response. + */ @Getter +@Builder +@Introspected @NoArgsConstructor @AllArgsConstructor public class ConsumerGroupResetOffsetsResponse { - /** - * API version - */ private final String apiVersion = "v1"; - - /** - * Resource kind - */ private final String kind = "ConsumerGroupResetOffsetsResponse"; /** - * Resource metadata + * Resource metadata. */ @Valid @NotNull private ObjectMeta metadata; /** - * Resource specifications + * Resource specifications. */ @Valid @NotNull private ConsumerGroupResetOffsetsResponseSpec spec; + /** + * Consumer group reset offsets response specification. + */ @Getter @Builder @ToString @@ -45,22 +47,22 @@ public class ConsumerGroupResetOffsetsResponse { @NoArgsConstructor public static class ConsumerGroupResetOffsetsResponseSpec { /** - * The topic that was reset + * The topic that was reset. */ private String topic; /** - * The partition that was reset + * The partition that was reset. */ private int partition; /** - * The new offset + * The new offset. */ private Long offset; /** - * The consumer group + * The consumer group. */ private String consumerGroup; } diff --git a/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuota.java b/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuota.java index 58847d12..c8268355 100644 --- a/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuota.java +++ b/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuota.java @@ -2,17 +2,23 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; import java.util.Map; - +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.Getter; +import lombok.NoArgsConstructor; + +/** + * Resource quota. + */ @Data @Builder @Introspected -@AllArgsConstructor @NoArgsConstructor +@AllArgsConstructor public class ResourceQuota { private final String apiVersion = "v1"; private final String kind = "ResourceQuota"; @@ -24,6 +30,9 @@ public class ResourceQuota { @NotNull private Map spec; + /** + * Resource quota spec keys. + */ @Getter @AllArgsConstructor public enum ResourceQuotaSpecKey { diff --git a/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java b/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java index c2a43448..2d0150e2 100644 --- a/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java +++ b/src/main/java/com/michelin/ns4kafka/models/quota/ResourceQuotaResponse.java @@ -2,11 +2,17 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.ToString; +/** + * Resource quota response. + */ @Getter @Builder @Introspected @@ -24,6 +30,9 @@ public class ResourceQuotaResponse { @NotNull private ResourceQuotaResponseSpec spec; + /** + * Resource quota response spec. + */ @Getter @Builder @ToString diff --git a/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java b/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java index 56da0d60..d671b8db 100644 --- a/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java +++ b/src/main/java/com/michelin/ns4kafka/models/schema/Schema.java @@ -2,12 +2,19 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; import java.util.List; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +/** + * Schema. + */ @Data @Builder @Introspected @@ -25,6 +32,32 @@ public class Schema { @NotNull private SchemaSpec spec; + /** + * Schema compatibility. + */ + public enum Compatibility { + GLOBAL, + BACKWARD, + BACKWARD_TRANSITIVE, + FORWARD, + FORWARD_TRANSITIVE, + FULL, + FULL_TRANSITIVE, + NONE + } + + /** + * Schema type. + */ + public enum SchemaType { + AVRO, + JSON, + PROTOBUF + } + + /** + * Schema spec. + */ @Data @Builder @Introspected @@ -42,6 +75,9 @@ public static class SchemaSpec { private Compatibility compatibility = Compatibility.GLOBAL; private List references; + /** + * Schema reference. + */ @Getter @Setter @Builder @@ -54,22 +90,4 @@ public static class Reference { private Integer version; } } - - public enum Compatibility { - GLOBAL, - BACKWARD, - BACKWARD_TRANSITIVE, - FORWARD, - FORWARD_TRANSITIVE, - FULL, - FULL_TRANSITIVE, - NONE - } - - @Introspected - public enum SchemaType { - AVRO, - JSON, - PROTOBUF - } } diff --git a/src/main/java/com/michelin/ns4kafka/models/schema/SchemaCompatibilityState.java b/src/main/java/com/michelin/ns4kafka/models/schema/SchemaCompatibilityState.java index e96cdad6..e17bee26 100644 --- a/src/main/java/com/michelin/ns4kafka/models/schema/SchemaCompatibilityState.java +++ b/src/main/java/com/michelin/ns4kafka/models/schema/SchemaCompatibilityState.java @@ -2,14 +2,20 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; -import lombok.*; - import jakarta.validation.Valid; import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.ToString; -@Introspected -@Builder +/** + * Schema compatibility state. + */ @Getter +@Builder +@Introspected @NoArgsConstructor @AllArgsConstructor public class SchemaCompatibilityState { @@ -24,6 +30,9 @@ public class SchemaCompatibilityState { @NotNull private SchemaCompatibilityState.SchemaCompatibilityStateSpec spec; + /** + * Schema compatibility state spec. + */ @Getter @Builder @ToString diff --git a/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java b/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java index 7c3296b5..9cf49edf 100644 --- a/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java +++ b/src/main/java/com/michelin/ns4kafka/models/schema/SchemaList.java @@ -2,14 +2,16 @@ import com.michelin.ns4kafka.models.ObjectMeta; import io.micronaut.core.annotation.Introspected; +import jakarta.validation.Valid; +import jakarta.validation.constraints.NotNull; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotNull; - +/** + * Schema list. + */ @Data @Builder @Introspected diff --git a/src/main/java/com/michelin/ns4kafka/config/AkhqClaimProviderControllerConfig.java b/src/main/java/com/michelin/ns4kafka/properties/AkhqProperties.java similarity index 84% rename from src/main/java/com/michelin/ns4kafka/config/AkhqClaimProviderControllerConfig.java rename to src/main/java/com/michelin/ns4kafka/properties/AkhqProperties.java index 30e25294..de4d0ce4 100644 --- a/src/main/java/com/michelin/ns4kafka/config/AkhqClaimProviderControllerConfig.java +++ b/src/main/java/com/michelin/ns4kafka/properties/AkhqProperties.java @@ -1,17 +1,19 @@ -package com.michelin.ns4kafka.config; +package com.michelin.ns4kafka.properties; import com.michelin.ns4kafka.models.AccessControlEntry; import io.micronaut.context.annotation.ConfigurationProperties; -import lombok.Getter; -import lombok.Setter; - import java.util.List; import java.util.Map; +import lombok.Getter; +import lombok.Setter; +/** + * Akhq properties. + */ @Getter @Setter @ConfigurationProperties("ns4kafka.akhq") -public class AkhqClaimProviderControllerConfig { +public class AkhqProperties { private String groupLabel; private Map roles; private List formerRoles; diff --git a/src/main/java/com/michelin/ns4kafka/config/KafkaStoreConfig.java b/src/main/java/com/michelin/ns4kafka/properties/KafkaStoreProperties.java similarity index 78% rename from src/main/java/com/michelin/ns4kafka/config/KafkaStoreConfig.java rename to src/main/java/com/michelin/ns4kafka/properties/KafkaStoreProperties.java index 0d23f3b9..f4b36e4f 100644 --- a/src/main/java/com/michelin/ns4kafka/config/KafkaStoreConfig.java +++ b/src/main/java/com/michelin/ns4kafka/properties/KafkaStoreProperties.java @@ -1,16 +1,18 @@ -package com.michelin.ns4kafka.config; +package com.michelin.ns4kafka.properties; import io.micronaut.context.annotation.ConfigurationProperties; import io.micronaut.core.convert.format.MapFormat; +import java.util.Map; import lombok.Getter; import lombok.Setter; -import java.util.Map; - +/** + * Kafka store properties. + */ @Getter @Setter @ConfigurationProperties("ns4kafka.store.kafka.topics") -public class KafkaStoreConfig { +public class KafkaStoreProperties { private String prefix; private int replicationFactor; diff --git a/src/main/java/com/michelin/ns4kafka/config/KafkaAsyncExecutorConfig.java b/src/main/java/com/michelin/ns4kafka/properties/ManagedClusterProperties.java similarity index 70% rename from src/main/java/com/michelin/ns4kafka/config/KafkaAsyncExecutorConfig.java rename to src/main/java/com/michelin/ns4kafka/properties/ManagedClusterProperties.java index 86060f1e..473442ac 100644 --- a/src/main/java/com/michelin/ns4kafka/config/KafkaAsyncExecutorConfig.java +++ b/src/main/java/com/michelin/ns4kafka/properties/ManagedClusterProperties.java @@ -1,20 +1,22 @@ -package com.michelin.ns4kafka.config; +package com.michelin.ns4kafka.properties; import io.micronaut.context.annotation.ConfigurationProperties; import io.micronaut.context.annotation.EachProperty; import io.micronaut.context.annotation.Parameter; import io.micronaut.core.annotation.Introspected; +import java.util.Map; +import java.util.Properties; import lombok.Getter; import lombok.Setter; import org.apache.kafka.clients.admin.Admin; -import java.util.Map; -import java.util.Properties; - +/** + * Managed cluster properties. + */ @Getter @Setter @EachProperty("ns4kafka.managed-clusters") -public class KafkaAsyncExecutorConfig { +public class ManagedClusterProperties { private String name; private boolean manageTopics; private boolean manageAcls; @@ -23,51 +25,61 @@ public class KafkaAsyncExecutorConfig { private boolean manageConnectors; private KafkaProvider provider; private Properties config; - private Map connects; - private RegistryConfig schemaRegistry; + private Map connects; + private SchemaRegistryProperties schemaRegistry; private Admin adminClient = null; - public KafkaAsyncExecutorConfig(@Parameter String name) { + public ManagedClusterProperties(@Parameter String name) { this.name = name; } - public KafkaAsyncExecutorConfig(@Parameter String name, @Parameter KafkaProvider provider) { + public ManagedClusterProperties(@Parameter String name, @Parameter KafkaProvider provider) { this.name = name; this.provider = provider; } + /** + * Getter for admin client service. + * + * @return The admin client + */ + public Admin getAdminClient() { + if (this.adminClient == null) { + this.adminClient = Admin.create(config); + } + + return this.adminClient; + } + + /** + * Kafka provider. + */ + public enum KafkaProvider { + SELF_MANAGED, + CONFLUENT_CLOUD + } + + /** + * Connect properties. + */ @Getter @Setter @Introspected - public static class ConnectConfig { + public static class ConnectProperties { String url; String basicAuthUsername; String basicAuthPassword; } + /** + * Schema registry properties. + */ @Getter @Setter @ConfigurationProperties("schema-registry") - public static class RegistryConfig { + public static class SchemaRegistryProperties { String url; String basicAuthUsername; String basicAuthPassword; } - - public enum KafkaProvider { - SELF_MANAGED, - CONFLUENT_CLOUD - } - - /** - * Getter for admin client service - * @return The admin client - */ - public Admin getAdminClient() { - if (this.adminClient == null) { - this.adminClient = Admin.create(config); - } - - return this.adminClient; - } } diff --git a/src/main/java/com/michelin/ns4kafka/config/SecurityConfig.java b/src/main/java/com/michelin/ns4kafka/properties/SecurityProperties.java similarity index 77% rename from src/main/java/com/michelin/ns4kafka/config/SecurityConfig.java rename to src/main/java/com/michelin/ns4kafka/properties/SecurityProperties.java index 6515cc26..1533523d 100644 --- a/src/main/java/com/michelin/ns4kafka/config/SecurityConfig.java +++ b/src/main/java/com/michelin/ns4kafka/properties/SecurityProperties.java @@ -1,16 +1,18 @@ -package com.michelin.ns4kafka.config; +package com.michelin.ns4kafka.properties; import com.michelin.ns4kafka.security.local.LocalUser; import io.micronaut.context.annotation.ConfigurationProperties; +import java.util.List; import lombok.Getter; import lombok.Setter; -import java.util.List; - +/** + * Security properties. + */ @Getter @Setter @ConfigurationProperties("ns4kafka.security") -public class SecurityConfig { +public class SecurityProperties { private List localUsers; private String adminGroup; private String aes256EncryptionKey; diff --git a/src/main/java/com/michelin/ns4kafka/repositories/AccessControlEntryRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/AccessControlEntryRepository.java index 1bfdcb9f..6e914c82 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/AccessControlEntryRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/AccessControlEntryRepository.java @@ -1,13 +1,18 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.AccessControlEntry; - import java.util.Collection; import java.util.Optional; +/** + * Access control entry repository. + */ public interface AccessControlEntryRepository { Collection findAll(); + Optional findByName(String namespace, String name); + AccessControlEntry create(AccessControlEntry accessControlEntry); + void delete(AccessControlEntry accessControlEntry); } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java index 0540b854..37f63299 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/ConnectClusterRepository.java @@ -1,12 +1,17 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; - import java.util.List; +/** + * Repository to manage Kafka Connect clusters. + */ public interface ConnectClusterRepository { List findAll(); + List findAllForCluster(String cluster); + ConnectCluster create(ConnectCluster connectCluster); + void delete(ConnectCluster connectCluster); } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java index 9f783e78..fedc9423 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/ConnectorRepository.java @@ -1,26 +1,31 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.connector.Connector; - import java.util.List; +/** + * Connector repository interface. + */ public interface ConnectorRepository { /** - * Find all connectors by cluster + * Find all connectors by cluster. + * * @param cluster The cluster * @return The list of connectors */ List findAllForCluster(String cluster); /** - * Create a given connector + * Create a given connector. + * * @param connector The connector to create * @return The created connector */ Connector create(Connector connector); /** - * Delete a given connector + * Delete a given connector. + * * @param connector The connector to delete */ void delete(Connector connector); diff --git a/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java index a05e436d..e8b95c66 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/NamespaceRepository.java @@ -1,13 +1,18 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.Namespace; - import java.util.List; import java.util.Optional; +/** + * Namespace repository. + */ public interface NamespaceRepository { List findAllForCluster(String cluster); + Namespace createNamespace(Namespace namespace); + Optional findByName(String namespace); + void delete(Namespace namespace); } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/ResourceQuotaRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/ResourceQuotaRepository.java index d335e96f..83ae0f90 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/ResourceQuotaRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/ResourceQuotaRepository.java @@ -1,32 +1,38 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.quota.ResourceQuota; - import java.util.List; import java.util.Optional; +/** + * Resource quota repository. + */ public interface ResourceQuotaRepository { /** - * Find all quotas of all namespaces + * Find all quotas of all namespaces. + * * @return The resource quotas */ List findAll(); /** - * Get resource quota by namespace + * Get resource quota by namespace. + * * @param namespace The namespace used to research * @return The resource quotas associated to the namespace */ Optional findForNamespace(String namespace); /** - * Create a resource quota + * Create a resource quota. + * * @param resourceQuota The resource quota to create */ ResourceQuota create(ResourceQuota resourceQuota); /** - * Delete a resource quota + * Delete a resource quota. + * * @param resourceQuota The resource quota to delete */ void delete(ResourceQuota resourceQuota); diff --git a/src/main/java/com/michelin/ns4kafka/repositories/RoleBindingRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/RoleBindingRepository.java index 2a5eb332..78834a75 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/RoleBindingRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/RoleBindingRepository.java @@ -1,33 +1,39 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.RoleBinding; - import java.util.Collection; import java.util.List; +/** + * Role binding repository. + */ public interface RoleBindingRepository { /** - * List role bindings by groups + * List role bindings by groups. + * * @param groups The groups used to research * @return The list of associated role bindings */ List findAllForGroups(Collection groups); /** - * List role bindings by namespace + * List role bindings by namespace. + * * @param namespace The namespace used to research * @return The list of associated role bindings */ List findAllForNamespace(String namespace); /** - * Create a role binding + * Create a role binding. + * * @param roleBinding The role binding to create */ RoleBinding create(RoleBinding roleBinding); /** - * Delete a role binding + * Delete a role binding. + * * @param roleBinding The role binding to delete */ void delete(RoleBinding roleBinding); diff --git a/src/main/java/com/michelin/ns4kafka/repositories/StreamRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/StreamRepository.java index d2be1140..23d32ca2 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/StreamRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/StreamRepository.java @@ -1,17 +1,12 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.KafkaStream; - import java.util.List; +/** + * Stream repository. + */ public interface StreamRepository { - /*** - * - * @param cluster the cluster id - * @return the list of all kafkastreams for this cluster as a KV Map with :
- * key : String : KafkaStream Name
- * value : KafkaStream : KafkaStream data
- */ List findAllForCluster(String cluster); KafkaStream create(KafkaStream stream); diff --git a/src/main/java/com/michelin/ns4kafka/repositories/TopicRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/TopicRepository.java index e2ff6908..0d8d0cbc 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/TopicRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/TopicRepository.java @@ -1,32 +1,38 @@ package com.michelin.ns4kafka.repositories; import com.michelin.ns4kafka.models.Topic; - import java.util.List; +/** + * Topic repository. + */ public interface TopicRepository { /** - * Find all topics + * Find all topics. + * * @return The list of topics */ List findAll(); /** - * Find all topics by cluster + * Find all topics by cluster. + * * @param cluster The cluster * @return The list of topics */ List findAllForCluster(String cluster); /** - * Create a given topic + * Create a given topic. + * * @param topic The topic to create * @return The created topic */ Topic create(Topic topic); /** - * Delete a given topic + * Delete a given topic. + * * @param topic The topic to delete */ void delete(Topic topic); diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/DelayStartupListener.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/DelayStartupListener.java index dd1eddb0..2150ccdc 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/DelayStartupListener.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/DelayStartupListener.java @@ -3,25 +3,31 @@ import io.micronaut.context.event.ApplicationEventListener; import io.micronaut.context.event.StartupEvent; import jakarta.inject.Inject; -import lombok.extern.slf4j.Slf4j; - import java.util.List; +import lombok.extern.slf4j.Slf4j; +/** + * Delay startup listener. + */ @Slf4j public class DelayStartupListener implements ApplicationEventListener { @Inject List> kafkaStores; + /** + * Wait for KafkaStores to be ready before starting the HTTP listener. + * This is required to avoid serving requests before KafkaStores are ready. + * + * @param event the event to respond to + */ @Override public void onApplicationEvent(StartupEvent event) { - // Micronaut will not start the HTTP listener until all ServerStartupEvent are completed - // We must not serve requests if KafkaStores are not ready. - while(!kafkaStores.stream().allMatch(KafkaStore::isInitialized)) { + while (!kafkaStores.stream().allMatch(KafkaStore::isInitialized)) { try { Thread.sleep(1000); log.info("Waiting for Kafka store to catch up"); } catch (InterruptedException e) { - log.error("Exception ",e); + log.error("Exception ", e); Thread.currentThread().interrupt(); } kafkaStores.forEach(KafkaStore::reportInitProgress); diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaAccessControlEntryRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaAccessControlEntryRepository.java index 00b2c56f..a66f41cd 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaAccessControlEntryRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaAccessControlEntryRepository.java @@ -2,30 +2,38 @@ import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.repositories.AccessControlEntryRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; - import java.util.Collection; import java.util.Optional; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +/** + * Access control entry repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) -public class KafkaAccessControlEntryRepository extends KafkaStore implements AccessControlEntryRepository { - public KafkaAccessControlEntryRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.access-control-entries") String kafkaTopic, - @KafkaClient("access-control-entries-producer") Producer kafkaProducer) { +public class KafkaAccessControlEntryRepository extends KafkaStore + implements AccessControlEntryRepository { + public KafkaAccessControlEntryRepository( + @Value("${ns4kafka.store.kafka.topics.prefix}.access-control-entries") String kafkaTopic, + @KafkaClient("access-control-entries-producer") Producer kafkaProducer) { super(kafkaTopic, kafkaProducer); } @Override String getMessageKey(AccessControlEntry accessControlEntry) { - return accessControlEntry.getMetadata().getNamespace() + "/" + accessControlEntry.getMetadata().getName(); + return accessControlEntry.getMetadata().getNamespace() + "/" + accessControlEntry.getMetadata().getName(); } @Override @@ -35,16 +43,16 @@ public AccessControlEntry create(AccessControlEntry accessControlEntry) { @Override public void delete(AccessControlEntry accessControlEntry) { - produce(getMessageKey(accessControlEntry),null); + produce(getMessageKey(accessControlEntry), null); } @Override public Optional findByName(String namespace, String name) { return getKafkaStore().values() - .stream() - .filter(ace -> ace.getMetadata().getNamespace().equals(namespace)) - .filter(ace -> ace.getMetadata().getName().equals(name)) - .findFirst(); + .stream() + .filter(ace -> ace.getMetadata().getNamespace().equals(namespace)) + .filter(ace -> ace.getMetadata().getName().equals(name)) + .findFirst(); } @Topic(value = "${ns4kafka.store.kafka.topics.prefix}.access-control-entries") diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java index 9030fd47..30a04ca3 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectClusterRepository.java @@ -2,24 +2,31 @@ import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; import com.michelin.ns4kafka.repositories.ConnectClusterRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; - import java.util.ArrayList; import java.util.List; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +/** + * Kafka Connect Cluster repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaConnectClusterRepository extends KafkaStore implements ConnectClusterRepository { - public KafkaConnectClusterRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.connect-workers") String kafkaTopic, - @KafkaClient("connect-workers") Producer kafkaProducer) { + public KafkaConnectClusterRepository( + @Value("${ns4kafka.store.kafka.topics.prefix}.connect-workers") String kafkaTopic, + @KafkaClient("connect-workers") Producer kafkaProducer) { super(kafkaTopic, kafkaProducer); } @@ -31,8 +38,8 @@ public List findAll() { @Override public List findAllForCluster(String cluster) { return getKafkaStore().values().stream() - .filter(connectCluster -> connectCluster.getMetadata().getCluster().equals(cluster)) - .toList(); + .filter(connectCluster -> connectCluster.getMetadata().getCluster().equals(cluster)) + .toList(); } @Override @@ -42,7 +49,7 @@ public ConnectCluster create(ConnectCluster connectCluster) { @Override public void delete(ConnectCluster connectCluster) { - this.produce(getMessageKey(connectCluster),null); + this.produce(getMessageKey(connectCluster), null); } @Override diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java index 750bcfb7..38d32d98 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaConnectorRepository.java @@ -2,20 +2,25 @@ import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.repositories.ConnectorRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; +import java.util.List; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.Producer; -import java.util.List; -import java.util.stream.Collectors; - +/** + * Kafka Connector repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaConnectorRepository extends KafkaStore implements ConnectorRepository { public KafkaConnectorRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.connectors") String kafkaTopic, @@ -35,33 +40,36 @@ void receive(ConsumerRecord record) { } /** - * Create a given connector + * Create a given connector. + * * @param connector The connector to create * @return The created connector */ @Override public Connector create(Connector connector) { - return this.produce(getMessageKey(connector),connector); + return this.produce(getMessageKey(connector), connector); } /** - * Delete a given connector + * Delete a given connector. + * * @param connector The connector to delete */ @Override public void delete(Connector connector) { - this.produce(getMessageKey(connector),null); + this.produce(getMessageKey(connector), null); } /** - * Find all connectors by cluster + * Find all connectors by cluster. + * * @param cluster The cluster * @return The list of connectors */ @Override public List findAllForCluster(String cluster) { return getKafkaStore().values().stream() - .filter(connector -> connector.getMetadata().getCluster().equals(cluster)) - .toList(); + .filter(connector -> connector.getMetadata().getCluster().equals(cluster)) + .toList(); } } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaNamespaceRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaNamespaceRepository.java index 6448a80c..8226c3c6 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaNamespaceRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaNamespaceRepository.java @@ -2,20 +2,26 @@ import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.repositories.NamespaceRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; - import java.util.List; import java.util.Optional; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +/** + * Kafka Namespace repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaNamespaceRepository extends KafkaStore implements NamespaceRepository { @@ -31,12 +37,12 @@ String getMessageKey(Namespace namespace) { @Override public Namespace createNamespace(Namespace namespace) { - return produce(getMessageKey(namespace),namespace); + return produce(getMessageKey(namespace), namespace); } @Override public void delete(Namespace namespace) { - produce(getMessageKey(namespace),null); + produce(getMessageKey(namespace), null); } @Topic(value = "${ns4kafka.store.kafka.topics.prefix}.namespaces") @@ -47,17 +53,17 @@ void receive(ConsumerRecord record) { @Override public List findAllForCluster(String cluster) { return getKafkaStore().values() - .stream() - .filter(namespace -> namespace.getMetadata().getCluster().equals(cluster)) - .toList(); + .stream() + .filter(namespace -> namespace.getMetadata().getCluster().equals(cluster)) + .toList(); } @Override public Optional findByName(String namespace) { return getKafkaStore().values() - .stream() - .filter(ns -> ns.getMetadata().getName().equals(namespace)) - .findFirst(); + .stream() + .filter(ns -> ns.getMetadata().getName().equals(namespace)) + .findFirst(); } } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java index d1c02187..245de5d5 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaResourceQuotaRepository.java @@ -2,30 +2,38 @@ import com.michelin.ns4kafka.models.quota.ResourceQuota; import com.michelin.ns4kafka.repositories.ResourceQuotaRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; - import java.util.ArrayList; import java.util.List; import java.util.Optional; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +/** + * Kafka Resource Quota repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaResourceQuotaRepository extends KafkaStore implements ResourceQuotaRepository { /** - * Constructor - * @param kafkaTopic The resource quota topic + * Constructor. + * + * @param kafkaTopic The resource quota topic * @param kafkaProducer The resource quota producer */ - public KafkaResourceQuotaRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.resource-quotas") String kafkaTopic, - @KafkaClient("resource-quotas") Producer kafkaProducer) { + public KafkaResourceQuotaRepository( + @Value("${ns4kafka.store.kafka.topics.prefix}.resource-quotas") String kafkaTopic, + @KafkaClient("resource-quotas") Producer kafkaProducer) { super(kafkaTopic, kafkaProducer); } @@ -35,7 +43,8 @@ String getMessageKey(ResourceQuota message) { } /** - * Find all quotas of all namespaces + * Find all quotas of all namespaces. + * * @return The resource quotas */ @Override @@ -44,20 +53,22 @@ public List findAll() { } /** - * Get resource quota by namespace + * Get resource quota by namespace. + * * @param namespace The namespace used to research * @return A resource quota */ @Override public Optional findForNamespace(String namespace) { return getKafkaStore().values() - .stream() - .filter(resourceQuota -> resourceQuota.getMetadata().getNamespace().equals(namespace)) - .findFirst(); + .stream() + .filter(resourceQuota -> resourceQuota.getMetadata().getNamespace().equals(namespace)) + .findFirst(); } /** - * Consume messages from resource quotas topic + * Consume messages from resource quotas topic. + * * @param record The resource quota message */ @Override @@ -67,21 +78,23 @@ void receive(ConsumerRecord record) { } /** - * Produce a resource quota message + * Produce a resource quota message. + * * @param resourceQuota The resource quota to create * @return The created resource quota */ @Override public ResourceQuota create(ResourceQuota resourceQuota) { - return produce(getMessageKey(resourceQuota),resourceQuota); + return produce(getMessageKey(resourceQuota), resourceQuota); } /** - * Delete a resource quota message by pushing a tomb stone message + * Delete a resource quota message by pushing a tomb stone message. + * * @param resourceQuota The resource quota to delete */ @Override public void delete(ResourceQuota resourceQuota) { - produce(getMessageKey(resourceQuota),null); + produce(getMessageKey(resourceQuota), null); } } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaRoleBindingRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaRoleBindingRepository.java index e375ec17..8ddde0bb 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaRoleBindingRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaRoleBindingRepository.java @@ -2,35 +2,43 @@ import com.michelin.ns4kafka.models.RoleBinding; import com.michelin.ns4kafka.repositories.RoleBindingRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; - import java.util.Collection; import java.util.List; -import java.util.stream.Collectors; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +/** + * Kafka Role Binding repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaRoleBindingRepository extends KafkaStore implements RoleBindingRepository { /** - * Constructor - * @param kafkaTopic The role bindings topic + * Constructor. + * + * @param kafkaTopic The role bindings topic * @param kafkaProducer The role bindings kafka producer */ public KafkaRoleBindingRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.role-bindings") String kafkaTopic, - @KafkaClient("role-binding-producer") Producer kafkaProducer) { + @KafkaClient("role-binding-producer") + Producer kafkaProducer) { super(kafkaTopic, kafkaProducer); } /** - * Build message key from role binding + * Build message key from role binding. + * * @param roleBinding The role binding used to build the key * @return A key */ @@ -40,7 +48,8 @@ String getMessageKey(RoleBinding roleBinding) { } /** - * Consume messages from role bindings topic + * Consume messages from role bindings topic. + * * @param record The role binding message */ @Override @@ -50,50 +59,54 @@ void receive(ConsumerRecord record) { } /** - * Produce a role binding message + * Produce a role binding message. + * * @param roleBinding The role binding to create * @return The created role binding */ @Override public RoleBinding create(RoleBinding roleBinding) { - return this.produce(getMessageKey(roleBinding),roleBinding); + return this.produce(getMessageKey(roleBinding), roleBinding); } /** - * Delete a role binding message by pushing a tomb stone message + * Delete a role binding message by pushing a tomb stone message. + * * @param roleBinding The role binding to delete */ @Override public void delete(RoleBinding roleBinding) { - this.produce(getMessageKey(roleBinding),null); + this.produce(getMessageKey(roleBinding), null); } /** - * List role bindings by groups + * List role bindings by groups. + * * @param groups The groups used to research * @return The list of associated role bindings */ @Override public List findAllForGroups(Collection groups) { return getKafkaStore().values() + .stream() + .filter(roleBinding -> groups .stream() - .filter(roleBinding -> groups - .stream() - .anyMatch(group -> roleBinding.getSpec().getSubject().getSubjectType() == RoleBinding.SubjectType.GROUP - && roleBinding.getSpec().getSubject().getSubjectName().equals(group))) - .toList(); + .anyMatch(group -> roleBinding.getSpec().getSubject().getSubjectType() == RoleBinding.SubjectType.GROUP + && roleBinding.getSpec().getSubject().getSubjectName().equals(group))) + .toList(); } /** - * List role bindings by namespace + * List role bindings by namespace. + * * @param namespace The namespace used to research * @return The list of associated role bindings */ @Override public List findAllForNamespace(String namespace) { return getKafkaStore().values() - .stream() - .filter(roleBinding -> roleBinding.getMetadata().getNamespace().equals(namespace)) - .toList(); + .stream() + .filter(roleBinding -> roleBinding.getMetadata().getNamespace().equals(namespace)) + .toList(); } } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java index 1f1465be..c7335cbd 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStore.java @@ -1,6 +1,6 @@ package com.michelin.ns4kafka.repositories.kafka; -import com.michelin.ns4kafka.config.KafkaStoreConfig; +import com.michelin.ns4kafka.properties.KafkaStoreProperties; import io.micronaut.context.ApplicationContext; import io.micronaut.context.annotation.Property; import io.micronaut.scheduling.TaskExecutors; @@ -8,6 +8,18 @@ import jakarta.annotation.PostConstruct; import jakarta.inject.Inject; import jakarta.inject.Named; +import java.time.Duration; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.Config; @@ -22,50 +34,34 @@ import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.errors.TopicExistsException; -import java.time.Duration; -import java.util.Collections; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.ReentrantLock; - +/** + * Kafka store. + * + * @param The type of the store + */ @Slf4j public abstract class KafkaStore { + private final Map store; + private final AtomicBoolean initialized = new AtomicBoolean(false); + private final ReentrantLock offsetUpdateLock; + private final Condition offsetReachedThreshold; @Inject ApplicationContext applicationContext; - @Inject AdminClient adminClient; - @Inject - KafkaStoreConfig kafkaStoreConfig; - + KafkaStoreProperties kafkaStoreProperties; @Inject @Named(TaskExecutors.SCHEDULED) TaskScheduler taskScheduler; - - private final Map store; - String kafkaTopic; - - Producer kafkaProducer; - + Producer kafkaProducer; long offsetInSchemasTopic = -1; - long lastWrittenOffset = -1; - - private final AtomicBoolean initialized = new AtomicBoolean(false); - - private final ReentrantLock offsetUpdateLock; - - private final Condition offsetReachedThreshold; - @Property(name = "ns4kafka.store.kafka.init-timeout") int initTimeout; - KafkaStore(String kafkaTopic, Producer kafkaProducer){ + KafkaStore(String kafkaTopic, Producer kafkaProducer) { this.kafkaTopic = kafkaTopic; this.kafkaProducer = kafkaProducer; this.store = new ConcurrentHashMap<>(); @@ -74,14 +70,16 @@ public abstract class KafkaStore { } /** - * Get message key + * Get message key. + * * @param message The message * @return The key of the message */ abstract String getMessageKey(T message); /** - * Create or verify the internal topic + * Create or verify the internal topic. + * * @throws KafkaStoreException Exception thrown during internal topic creation or verification */ @PostConstruct @@ -91,14 +89,15 @@ private void createOrVerifyTopic() throws KafkaStoreException { } /** - * Create or verify the internal kafka topic + * Create or verify the internal kafka topic. + * * @throws KafkaStoreException Exception thrown during internal topic creation or verification */ private void createOrVerifyInternalTopic() throws KafkaStoreException { try { Set allTopics = adminClient.listTopics() - .names() - .get(initTimeout, TimeUnit.MILLISECONDS); + .names() + .get(initTimeout, TimeUnit.MILLISECONDS); if (allTopics.contains(kafkaTopic)) { verifyInternalTopic(); @@ -107,94 +106,105 @@ private void createOrVerifyInternalTopic() throws KafkaStoreException { } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new KafkaStoreException("Thread interrupted trying to create or validate configuration of topic " + kafkaTopic + ".", e); + throw new KafkaStoreException( + "Thread interrupted trying to create or validate configuration of topic " + kafkaTopic + ".", e); } catch (ExecutionException e) { - throw new KafkaStoreException("Execution error trying to create or validate configuration of topic " + kafkaTopic + ".", e); + throw new KafkaStoreException( + "Execution error trying to create or validate configuration of topic " + kafkaTopic + ".", e); } catch (TimeoutException e) { - throw new KafkaStoreException("Timed out trying to create or validate configuration of topic " + kafkaTopic + ".", e); + throw new KafkaStoreException( + "Timed out trying to create or validate configuration of topic " + kafkaTopic + ".", e); } } /** - * Verify the internal topic - * @throws KafkaStoreException Exception thrown during internal topic verification + * Verify the internal topic. + * + * @throws KafkaStoreException Exception thrown during internal topic verification * @throws InterruptedException Exception thrown during internal topic verification - * @throws ExecutionException Exception thrown during internal topic verification - * @throws TimeoutException Exception thrown during internal topic verification + * @throws ExecutionException Exception thrown during internal topic verification + * @throws TimeoutException Exception thrown during internal topic verification */ - private void verifyInternalTopic() throws KafkaStoreException, InterruptedException, ExecutionException, TimeoutException { + private void verifyInternalTopic() + throws KafkaStoreException, InterruptedException, ExecutionException, TimeoutException { log.info("Validating topic {}.", kafkaTopic); Set topics = Collections.singleton(kafkaTopic); Map topicDescription = adminClient.describeTopics(topics) - .all() - .get(initTimeout, TimeUnit.MILLISECONDS); + .all() + .get(initTimeout, TimeUnit.MILLISECONDS); TopicDescription description = topicDescription.get(kafkaTopic); final int numPartitions = description.partitions().size(); if (numPartitions != 1) { - throw new KafkaStoreException("The topic " + kafkaTopic + " should have only 1 partition but has " + numPartitions + "."); + throw new KafkaStoreException( + "The topic " + kafkaTopic + " should have only 1 partition but has " + numPartitions + "."); } - if (description.partitions().get(0).replicas().size() < kafkaStoreConfig.getReplicationFactor() && log.isWarnEnabled()) { + if (description.partitions().get(0).replicas().size() < kafkaStoreProperties.getReplicationFactor() + && log.isWarnEnabled()) { log.warn("The replication factor of the topic " + kafkaTopic + " is less than the desired one of " - + kafkaStoreConfig.getReplicationFactor() + ". If this is a production environment, it's crucial to add more brokers and " - + "increase the replication factor of the topic."); + + kafkaStoreProperties.getReplicationFactor() + + ". If this is a production environment, it's crucial to add more brokers and " + + "increase the replication factor of the topic."); } ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, kafkaTopic); Map configs = adminClient.describeConfigs(Collections.singleton(topicResource)) - .all() - .get(initTimeout, TimeUnit.MILLISECONDS); + .all() + .get(initTimeout, TimeUnit.MILLISECONDS); Config topicConfigs = configs.get(topicResource); String retentionPolicy = topicConfigs.get(TopicConfig.CLEANUP_POLICY_CONFIG).value(); if (!TopicConfig.CLEANUP_POLICY_COMPACT.equals(retentionPolicy)) { if (log.isErrorEnabled()) { log.error("The retention policy of the topic " + kafkaTopic + " is incorrect. " - + "You must configure the topic to 'compact' cleanup policy to avoid Kafka " - + "deleting your data after a week. Refer to Kafka documentation for more details on cleanup policies"); + + "You must configure the topic to 'compact' cleanup policy to avoid Kafka " + + "deleting your data after a week. Refer to Kafka documentation for more details " + + "on cleanup policies"); } throw new KafkaStoreException("The retention policy of the schema kafkaTopic " + kafkaTopic - + " is incorrect. Expected cleanup.policy to be 'compact' but it is " + retentionPolicy); + + " is incorrect. Expected cleanup.policy to be 'compact' but it is " + retentionPolicy); } } /** - * Create the internal topic - * @throws KafkaStoreException Exception thrown during internal topic creation + * Create the internal topic. + * + * @throws KafkaStoreException Exception thrown during internal topic creation * @throws InterruptedException Exception thrown during internal topic creation - * @throws ExecutionException Exception thrown during internal topic creation - * @throws TimeoutException Exception thrown during internal topic creation + * @throws ExecutionException Exception thrown during internal topic creation + * @throws TimeoutException Exception thrown during internal topic creation */ - private void createInternalTopic() throws KafkaStoreException, InterruptedException, ExecutionException, TimeoutException { + private void createInternalTopic() + throws KafkaStoreException, InterruptedException, ExecutionException, TimeoutException { log.info("Creating topic {}.", kafkaTopic); int numLiveBrokers = adminClient.describeCluster() - .nodes() - .get(initTimeout, TimeUnit.MILLISECONDS).size(); + .nodes() + .get(initTimeout, TimeUnit.MILLISECONDS).size(); if (numLiveBrokers <= 0) { throw new KafkaStoreException("No live Kafka brokers."); } - int schemaTopicReplicationFactor = Math.min(numLiveBrokers, kafkaStoreConfig.getReplicationFactor()); - if (schemaTopicReplicationFactor < kafkaStoreConfig.getReplicationFactor() && log.isWarnEnabled()) { + int schemaTopicReplicationFactor = Math.min(numLiveBrokers, kafkaStoreProperties.getReplicationFactor()); + if (schemaTopicReplicationFactor < kafkaStoreProperties.getReplicationFactor() && log.isWarnEnabled()) { log.warn("Creating the kafkaTopic {}" + kafkaTopic + " using a replication factor of " - + schemaTopicReplicationFactor + ", which is less than the desired one of " - + kafkaStoreConfig.getReplicationFactor() + ". If this is a production environment, it's " - + "crucial to add more brokers and increase the replication factor of the kafkaTopic."); + + schemaTopicReplicationFactor + ", which is less than the desired one of " + + kafkaStoreProperties.getReplicationFactor() + ". If this is a production environment, it's " + + "crucial to add more brokers and increase the replication factor of the kafkaTopic."); } NewTopic schemaTopicRequest = new NewTopic(kafkaTopic, 1, (short) schemaTopicReplicationFactor); - schemaTopicRequest.configs(kafkaStoreConfig.getProps()); + schemaTopicRequest.configs(kafkaStoreProperties.getProps()); try { adminClient.createTopics(Collections.singleton(schemaTopicRequest)) - .all() - .get(initTimeout, TimeUnit.MILLISECONDS); + .all() + .get(initTimeout, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { if (e.getCause() instanceof TopicExistsException) { verifyInternalTopic(); @@ -205,16 +215,18 @@ private void createInternalTopic() throws KafkaStoreException, InterruptedExcept } /** - * Get the current Kafka store + * Get the current Kafka store. + * * @return The Kafka store */ - public Map getKafkaStore() { + public Map getKafkaStore() { return store; } /** - * Produce a new record - * @param key The record key + * Produce a new record. + * + * @param key The record key * @param message The record body * @return The produced record * @throws KafkaStoreException Exception thrown during the send process @@ -226,7 +238,7 @@ T produce(String key, T message) throws KafkaStoreException { boolean knownSuccessfulWrite = false; try { - ProducerRecord producerRecord = new ProducerRecord<>(kafkaTopic, key, message); + ProducerRecord producerRecord = new ProducerRecord<>(kafkaTopic, key, message); log.trace("Sending record to topic {}", producerRecord); Future ack = kafkaProducer.send(producerRecord); RecordMetadata recordMetadata = ack.get(initTimeout, TimeUnit.MILLISECONDS); @@ -255,6 +267,7 @@ T produce(String key, T message) throws KafkaStoreException { /** * Handle a new consumed record * See: /core/src/main/java/io/confluent/kafka/schemaregistry/storage/KafkaStoreReaderThread.java#L326 + * * @param message The record */ void receive(ConsumerRecord message) { @@ -281,6 +294,10 @@ void receive(ConsumerRecord message) { } } + /** + * Wait until the Kafka reader reaches the last offset. + * Mark the store as initialized when it is done. + */ public void waitUntilKafkaReaderReachesLastOffsetInit() { try { waitUntilOffset(getLatestOffset(), TimeUnit.MILLISECONDS); @@ -288,13 +305,14 @@ public void waitUntilKafkaReaderReachesLastOffsetInit() { if (!isInitialized) { throw new KafkaStoreException("Illegal state while initializing store. Store was already initialized"); } - } catch (Exception e){ + } catch (Exception e) { log.error("Unrecoverable error during initialization", e); } } /** - * Get latest offset + * Get latest offset. + * * @return The latest offset * @throws KafkaStoreException Exception while getting the latest offset */ @@ -305,26 +323,30 @@ private long getLatestOffset() throws KafkaStoreException { try { log.trace("Sending NOOP record to topic {} to find last offset.", kafkaTopic); - Future ack = kafkaProducer.send(new ProducerRecord<>(kafkaTopic,"NOOP",null)); + Future ack = kafkaProducer.send(new ProducerRecord<>(kafkaTopic, "NOOP", null)); RecordMetadata metadata = ack.get(initTimeout, TimeUnit.MILLISECONDS); this.lastWrittenOffset = metadata.offset(); log.trace("NOOP record's offset is {}", lastWrittenOffset); return lastWrittenOffset; } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new KafkaStoreException("Thread interrupted while waiting for the latest offset of topic " + kafkaTopic + ".", e); + throw new KafkaStoreException( + "Thread interrupted while waiting for the latest offset of topic " + kafkaTopic + ".", e); } catch (ExecutionException e) { - throw new KafkaStoreException("Execution error while waiting for the latest offset of topic " + kafkaTopic + ".", e); + throw new KafkaStoreException( + "Execution error while waiting for the latest offset of topic " + kafkaTopic + ".", e); } catch (TimeoutException e) { - throw new KafkaStoreException("Timeout while waiting for the latest offset of topic " + kafkaTopic + ".", e); + throw new KafkaStoreException("Timeout while waiting for the latest offset of topic " + kafkaTopic + ".", + e); } catch (Exception e) { throw new KafkaStoreException("Error while waiting for the latest offset of topic " + kafkaTopic + ".", e); } } /** - * Wait until the given offset is read - * @param offset The offset + * Wait until the given offset is read. + * + * @param offset The offset * @param timeUnit The time unit to wait * @throws KafkaStoreException Exception thrown during the wait process */ @@ -343,8 +365,9 @@ public void waitUntilOffset(long offset, TimeUnit timeUnit) throws KafkaStoreExc timeoutNs = offsetReachedThreshold.awaitNanos(timeoutNs); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - log.debug("Interrupted while waiting for the background store reader thread to reach the specified offset: {}", - offset, e); + log.debug("Interrupted while waiting for the background store reader thread " + + "to reach the specified offset: {}", + offset, e); } } } finally { @@ -353,21 +376,22 @@ public void waitUntilOffset(long offset, TimeUnit timeUnit) throws KafkaStoreExc if (offsetInSchemasTopic < offset) { throw new KafkaStoreException("Failed to reach target offset within the timeout interval. targetOffset: " - + offset + ", offsetReached: " + offsetInSchemasTopic + ", timeout(ms): " - + TimeUnit.MILLISECONDS.convert(initTimeout, timeUnit)); + + offset + ", offsetReached: " + offsetInSchemasTopic + ", timeout(ms): " + + TimeUnit.MILLISECONDS.convert(initTimeout, timeUnit)); } } /** - * Is the store initialized + * Is the store initialized. + * * @return true if it is, false otherwise */ - public boolean isInitialized(){ + public boolean isInitialized() { return initialized.get(); } /** - * Report the init process + * Report the init process. */ public void reportInitProgress() { if (isInitialized()) { diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreException.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreException.java index bc545b17..6e5c204b 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreException.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStoreException.java @@ -1,5 +1,8 @@ package com.michelin.ns4kafka.repositories.kafka; +/** + * Kafka Store Exception. + */ public class KafkaStoreException extends RuntimeException { public KafkaStoreException(String message, Throwable cause) { diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStreamRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStreamRepository.java index 481d58d0..7384dbcd 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStreamRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaStreamRepository.java @@ -2,20 +2,25 @@ import com.michelin.ns4kafka.models.KafkaStream; import com.michelin.ns4kafka.repositories.StreamRepository; -import io.micronaut.configuration.kafka.annotation.*; +import io.micronaut.configuration.kafka.annotation.KafkaClient; +import io.micronaut.configuration.kafka.annotation.KafkaListener; +import io.micronaut.configuration.kafka.annotation.OffsetReset; +import io.micronaut.configuration.kafka.annotation.OffsetStrategy; +import io.micronaut.configuration.kafka.annotation.Topic; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; +import java.util.List; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.Producer; -import java.util.List; -import java.util.stream.Collectors; - +/** + * Kafka Stream repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaStreamRepository extends KafkaStore implements StreamRepository { @@ -26,15 +31,15 @@ public KafkaStreamRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.strea @Override String getMessageKey(KafkaStream stream) { - return stream.getMetadata().getCluster()+"/"+ stream.getMetadata().getName(); + return stream.getMetadata().getCluster() + "/" + stream.getMetadata().getName(); } @Override public List findAllForCluster(String cluster) { return getKafkaStore().values() - .stream() - .filter(stream -> stream.getMetadata().getCluster().equals(cluster)) - .toList(); + .stream() + .filter(stream -> stream.getMetadata().getCluster().equals(cluster)) + .toList(); } @Override @@ -49,7 +54,7 @@ void receive(ConsumerRecord record) { @Override public void delete(KafkaStream stream) { - this.produce(getMessageKey(stream),null); + this.produce(getMessageKey(stream), null); } } diff --git a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaTopicRepository.java b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaTopicRepository.java index d9a1432c..d2db782e 100644 --- a/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaTopicRepository.java +++ b/src/main/java/com/michelin/ns4kafka/repositories/kafka/KafkaTopicRepository.java @@ -8,33 +8,35 @@ import io.micronaut.configuration.kafka.annotation.OffsetStrategy; import io.micronaut.context.annotation.Value; import jakarta.inject.Singleton; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; - import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +/** + * Kafka Topic repository. + */ @Singleton @KafkaListener( - offsetReset = OffsetReset.EARLIEST, - groupId = "${ns4kafka.store.kafka.group-id}", - offsetStrategy = OffsetStrategy.DISABLED + offsetReset = OffsetReset.EARLIEST, + groupId = "${ns4kafka.store.kafka.group-id}", + offsetStrategy = OffsetStrategy.DISABLED ) public class KafkaTopicRepository extends KafkaStore implements TopicRepository { public KafkaTopicRepository(@Value("${ns4kafka.store.kafka.topics.prefix}.topics") String kafkaTopic, - @KafkaClient("topics-producer") Producer kafkaProducer) { + @KafkaClient("topics-producer") Producer kafkaProducer) { super(kafkaTopic, kafkaProducer); } @Override String getMessageKey(Topic topic) { - return topic.getMetadata().getCluster()+"/"+topic.getMetadata().getName(); + return topic.getMetadata().getCluster() + "/" + topic.getMetadata().getName(); } /** - * Create a given topic + * Create a given topic. + * * @param topic The topic to create * @return The created topic */ @@ -44,12 +46,13 @@ public Topic create(Topic topic) { } /** - * Delete a given topic + * Delete a given topic. + * * @param topic The topic to delete */ @Override public void delete(Topic topic) { - this.produce(getMessageKey(topic),null); + this.produce(getMessageKey(topic), null); } @io.micronaut.configuration.kafka.annotation.Topic(value = "${ns4kafka.store.kafka.topics.prefix}.topics") @@ -58,7 +61,8 @@ void receive(ConsumerRecord record) { } /** - * Find all topics + * Find all topics. + * * @return The list of topics */ @Override @@ -67,15 +71,16 @@ public List findAll() { } /** - * Find all topics by cluster + * Find all topics by cluster. + * * @param cluster The cluster * @return The list of topics */ @Override public List findAllForCluster(String cluster) { return getKafkaStore().values() - .stream() - .filter(topic -> topic.getMetadata().getCluster().equals(cluster)) - .toList(); + .stream() + .filter(topic -> topic.getMetadata().getCluster().equals(cluster)) + .toList(); } } diff --git a/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java b/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java index 33b64adc..d31c21f3 100644 --- a/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java +++ b/src/main/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRule.java @@ -1,7 +1,7 @@ package com.michelin.ns4kafka.security; -import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.repositories.NamespaceRepository; import com.michelin.ns4kafka.repositories.RoleBindingRepository; import io.micronaut.core.annotation.Nullable; @@ -11,27 +11,30 @@ import io.micronaut.security.authentication.Authentication; import io.micronaut.security.rules.SecurityRule; import io.micronaut.security.rules.SecurityRuleResult; -import io.micronaut.web.router.RouteMatch; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import org.reactivestreams.Publisher; - import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; +import lombok.extern.slf4j.Slf4j; +import org.reactivestreams.Publisher; +/** + * Security rule to check if a user can access a given URL. + */ @Slf4j @Singleton public class ResourceBasedSecurityRule implements SecurityRule> { public static final String IS_ADMIN = "isAdmin()"; - private final Pattern namespacedResourcePattern = Pattern.compile("^\\/api\\/namespaces\\/(?[a-zA-Z0-9_-]+)\\/(?[a-z_-]+)(\\/([a-zA-Z0-9_.-]+)(\\/(?[a-z-]+))?)?$"); + private final Pattern namespacedResourcePattern = Pattern.compile( + "^\\/api\\/namespaces\\/(?[a-zA-Z0-9_-]+)" + + "\\/(?[a-z_-]+)(\\/([a-zA-Z0-9_.-]+)(\\/(?[a-z-]+))?)?$"); @Inject - SecurityConfig securityConfig; + SecurityProperties securityProperties; @Inject RoleBindingRepository roleBindingRepository; @@ -40,13 +43,15 @@ public class ResourceBasedSecurityRule implements SecurityRule> { NamespaceRepository namespaceRepository; @Override - public Publisher check(@Nullable HttpRequest request, @Nullable Authentication authentication) { + public Publisher check(@Nullable HttpRequest request, + @Nullable Authentication authentication) { return Publishers.just(checkSecurity(request, authentication)); } /** - * Check a user can access a given URL - * @param request The current request + * Check a user can access a given URL. + * + * @param request The current request * @param authentication The claims from the token * @return A security rule allowing the user or not */ @@ -55,19 +60,15 @@ public SecurityRuleResult checkSecurity(HttpRequest request, @Nullable Authen return SecurityRuleResult.UNKNOWN; } - if (!authentication.getAttributes().keySet().containsAll( List.of("groups", "sub", "roles"))) { - log.debug("No authentication available for path [{}]. Returning unknown.",request.getPath()); + if (!authentication.getAttributes().keySet().containsAll(List.of("groups", "sub", "roles"))) { + log.debug("No authentication available for path [{}]. Returning unknown.", request.getPath()); return SecurityRuleResult.UNKNOWN; } - String sub = authentication.getName(); - List groups = (List) authentication.getAttributes().get("groups"); - Collection roles = authentication.getRoles(); - // Request to a URL that is not in the scope of this SecurityRule Matcher matcher = namespacedResourcePattern.matcher(request.getPath()); if (!matcher.find()) { - log.debug("Invalid namespaced resource for path [{}]. Returning unknown.",request.getPath()); + log.debug("Invalid namespaced resource for path [{}]. Returning unknown.", request.getPath()); return SecurityRuleResult.UNKNOWN; } @@ -83,38 +84,42 @@ public SecurityRuleResult checkSecurity(HttpRequest request, @Nullable Authen } // Namespace doesn't exist + String sub = authentication.getName(); if (namespaceRepository.findByName(namespace).isEmpty()) { - log.debug("Namespace not found for user [{}] on path [{}]. Returning unknown.",sub,request.getPath()); + log.debug("Namespace not found for user [{}] on path [{}]. Returning unknown.", sub, request.getPath()); return SecurityRuleResult.UNKNOWN; } // Admin are allowed everything (provided that the namespace exists) + Collection roles = authentication.getRoles(); if (roles.contains(IS_ADMIN)) { - log.debug("Authorized admin user [{}] on path [{}]. Returning ALLOWED.",sub,request.getPath()); + log.debug("Authorized admin user [{}] on path [{}]. Returning ALLOWED.", sub, request.getPath()); return SecurityRuleResult.ALLOWED; } // Collect all roleBindings for this user + List groups = (List) authentication.getAttributes().get("groups"); Collection roleBindings = roleBindingRepository.findAllForGroups(groups); List authorizedRoleBindings = roleBindings.stream() - .filter(roleBinding -> roleBinding.getMetadata().getNamespace().equals(namespace)) - .filter(roleBinding -> roleBinding.getSpec().getRole().getResourceTypes().contains(resourceType)) - .filter(roleBinding -> roleBinding.getSpec().getRole().getVerbs() - .stream() - .map(Enum::name) - .toList() - .contains(request.getMethodName())) - .toList(); + .filter(roleBinding -> roleBinding.getMetadata().getNamespace().equals(namespace)) + .filter(roleBinding -> roleBinding.getSpec().getRole().getResourceTypes().contains(resourceType)) + .filter(roleBinding -> roleBinding.getSpec().getRole().getVerbs() + .stream() + .map(Enum::name) + .toList() + .contains(request.getMethodName())) + .toList(); // User not authorized to access requested resource if (authorizedRoleBindings.isEmpty()) { - log.debug("No matching RoleBinding for user [{}] on path [{}]. Returning unknown.",sub,request.getPath()); + log.debug("No matching RoleBinding for user [{}] on path [{}]. Returning unknown.", sub, request.getPath()); return SecurityRuleResult.UNKNOWN; } if (log.isDebugEnabled()) { - authorizedRoleBindings.forEach(roleBinding -> log.debug("Found matching RoleBinding : {}", roleBinding.toString())); - log.debug("Authorized user [{}] on path [{}]",sub,request.getPath()); + authorizedRoleBindings.forEach( + roleBinding -> log.debug("Found matching RoleBinding : {}", roleBinding.toString())); + log.debug("Authorized user [{}] on path [{}]", sub, request.getPath()); } return SecurityRuleResult.ALLOWED; @@ -125,10 +130,16 @@ public int getOrder() { return -1000; } + /** + * Compute roles from groups. + * + * @param groups The groups + * @return The roles + */ public List computeRolesFromGroups(List groups) { List roles = new ArrayList<>(); - if (groups.contains(securityConfig.getAdminGroup())) { + if (groups.contains(securityProperties.getAdminGroup())) { roles.add(ResourceBasedSecurityRule.IS_ADMIN); } diff --git a/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabApiClient.java b/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabApiClient.java index 34df8865..d8cee737 100644 --- a/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabApiClient.java +++ b/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabApiClient.java @@ -4,28 +4,33 @@ import io.micronaut.http.annotation.Get; import io.micronaut.http.annotation.Header; import io.micronaut.http.client.annotation.Client; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.util.List; import java.util.Map; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +/** + * Gitlab API client. + */ @Client("${micronaut.security.gitlab.url}") public interface GitlabApiClient { /** - * Get user groups + * Get user groups. + * * @param token The user token - * @param page The current page to fetch groups + * @param page The current page to fetch groups * @return The groups */ @Get("/api/v4/groups?min_access_level=10&sort=asc&page={page}&per_page=100") - Flux>>> getGroupsPage(@Header(value = "PRIVATE-TOKEN") String token, int page); + Flux>>> getGroupsPage(@Header(value = "PRIVATE-TOKEN") String token, + int page); /** - * Find a user by given token + * Find a user by given token. + * * @param token The user token * @return The user information */ @Get("/api/v4/user") - Mono> findUser(@Header(value = "PRIVATE-TOKEN") String token); + Mono> findUser(@Header(value = "PRIVATE-TOKEN") String token); } diff --git a/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java b/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java index d6568d9f..01ce5504 100644 --- a/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java +++ b/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationProvider.java @@ -1,20 +1,25 @@ package com.michelin.ns4kafka.security.gitlab; -import com.michelin.ns4kafka.config.SecurityConfig; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.RoleBindingService; import io.micronaut.core.annotation.Nullable; import io.micronaut.http.HttpRequest; -import io.micronaut.security.authentication.*; +import io.micronaut.security.authentication.AuthenticationException; +import io.micronaut.security.authentication.AuthenticationFailed; +import io.micronaut.security.authentication.AuthenticationProvider; +import io.micronaut.security.authentication.AuthenticationRequest; +import io.micronaut.security.authentication.AuthenticationResponse; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import java.util.Map; import lombok.extern.slf4j.Slf4j; import org.reactivestreams.Publisher; import reactor.core.publisher.Mono; -import java.util.Map; - - +/** + * Gitlab authentication provider. + */ @Slf4j @Singleton public class GitlabAuthenticationProvider implements AuthenticationProvider> { @@ -28,33 +33,40 @@ public class GitlabAuthenticationProvider implements AuthenticationProvider authenticate(@Nullable HttpRequest httpRequest, AuthenticationRequest authenticationRequest) { + public Publisher authenticate(@Nullable HttpRequest httpRequest, + AuthenticationRequest authenticationRequest) { String token = authenticationRequest.getSecret().toString(); log.debug("Checking authentication with token {}", token); return gitlabAuthenticationService.findUsername(token) - .onErrorResume(error -> Mono.error(new AuthenticationException(new AuthenticationFailed("Bad GitLab token")))) - .flatMap(username -> gitlabAuthenticationService.findAllGroups(token).collectList() - .onErrorResume(error -> Mono.error(new AuthenticationException(new AuthenticationFailed("Cannot retrieve your GitLab groups")))) - .flatMap(groups -> { - if (roleBindingService.listByGroups(groups).isEmpty() && !groups.contains(securityConfig.getAdminGroup())) { - log.debug("Error during authentication: user groups not found in any namespace"); - return Mono.error(new AuthenticationException(new AuthenticationFailed("No namespace matches your GitLab groups"))); - } else { - return Mono.just(AuthenticationResponse.success(username, resourceBasedSecurityRule.computeRolesFromGroups(groups), - Map.of("groups", groups))); - } - })); + .onErrorResume( + error -> Mono.error(new AuthenticationException(new AuthenticationFailed("Bad GitLab token")))) + .flatMap(username -> gitlabAuthenticationService.findAllGroups(token).collectList() + .onErrorResume(error -> Mono.error( + new AuthenticationException(new AuthenticationFailed("Cannot retrieve your GitLab groups")))) + .flatMap(groups -> { + if (roleBindingService.listByGroups(groups).isEmpty() + && !groups.contains(securityProperties.getAdminGroup())) { + log.debug("Error during authentication: user groups not found in any namespace"); + return Mono.error(new AuthenticationException( + new AuthenticationFailed("No namespace matches your GitLab groups"))); + } else { + return Mono.just(AuthenticationResponse.success(username, + resourceBasedSecurityRule.computeRolesFromGroups(groups), + Map.of("groups", groups))); + } + })); } } diff --git a/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationService.java b/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationService.java index 3f892f80..f11e6cca 100644 --- a/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationService.java +++ b/src/main/java/com/michelin/ns4kafka/security/gitlab/GitlabAuthenticationService.java @@ -4,13 +4,15 @@ import io.micronaut.http.HttpResponse; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import java.util.List; +import java.util.Map; import lombok.extern.slf4j.Slf4j; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; -import java.util.List; -import java.util.Map; - +/** + * Gitlab authentication service. + */ @Slf4j @Singleton public class GitlabAuthenticationService { @@ -18,36 +20,39 @@ public class GitlabAuthenticationService { GitlabApiClient gitlabApiClient; /** - * Get all GitLab user groups + * Get all GitLab user groups. + * * @param token The user token * @return The user groups */ - public Flux findAllGroups(String token){ - return getPageAndNext(token,1) - .flatMap(response -> Flux.fromStream(response.body() - .stream() - .map(stringObjectMap -> stringObjectMap.get("full_path").toString()))); + public Flux findAllGroups(String token) { + return getPageAndNext(token, 1) + .flatMap(response -> Flux.fromStream(response.body() + .stream() + .map(stringObjectMap -> stringObjectMap.get("full_path").toString()))); } /** - * Get username of GitLab user + * Get username of GitLab user. + * * @param token The user token * @return The username */ public Mono findUsername(String token) { return gitlabApiClient.findUser(token) - .map(stringObjectMap -> stringObjectMap.get("email").toString()); + .map(stringObjectMap -> stringObjectMap.get("email").toString()); } /** - * Fetch all pages of GitLab user groups + * Fetch all pages of GitLab user groups. + * * @param token The user token - * @param page The current page to fetch + * @param page The current page to fetch * @return The user groups information */ private Flux>>> getPageAndNext(String token, int page) { return gitlabApiClient.getGroupsPage(token, page) - .concatMap(response -> { + .concatMap(response -> { log.debug("Call GitLab groups page {}/{}.", page, response.header("X-Total-Pages")); if (StringUtils.isEmpty(response.header("X-Next-Page"))) { @@ -55,7 +60,7 @@ private Flux>>> getPageAndNext(String toke } else { int nextPage = Integer.parseInt(response.header("X-Next-Page")); return Flux.just(response) - .concatWith(getPageAndNext(token, nextPage)); + .concatWith(getPageAndNext(token, nextPage)); } }); } diff --git a/src/main/java/com/michelin/ns4kafka/security/ldap/LdapAuthenticationMapper.java b/src/main/java/com/michelin/ns4kafka/security/ldap/LdapAuthenticationMapper.java index 45069df0..88de3d9e 100644 --- a/src/main/java/com/michelin/ns4kafka/security/ldap/LdapAuthenticationMapper.java +++ b/src/main/java/com/michelin/ns4kafka/security/ldap/LdapAuthenticationMapper.java @@ -11,11 +11,13 @@ import io.micronaut.security.ldap.configuration.LdapConfiguration; import jakarta.inject.Inject; import jakarta.inject.Singleton; - import java.util.List; import java.util.Map; import java.util.Set; +/** + * Custom LDAP authentication mapper. + */ @Singleton @Replaces(DefaultContextAuthenticationMapper.class) @Requires(property = LdapConfiguration.PREFIX + ".enabled", notEquals = StringUtils.FALSE) @@ -25,6 +27,7 @@ public class LdapAuthenticationMapper implements ContextAuthenticationMapper { @Override public AuthenticationResponse map(ConvertibleValues attributes, String username, Set groups) { - return AuthenticationResponse.success(username, resourceBasedSecurityRule.computeRolesFromGroups(List.copyOf(groups)), Map.of("groups",groups)); + return AuthenticationResponse.success(username, + resourceBasedSecurityRule.computeRolesFromGroups(List.copyOf(groups)), Map.of("groups", groups)); } } diff --git a/src/main/java/com/michelin/ns4kafka/security/local/LocalUser.java b/src/main/java/com/michelin/ns4kafka/security/local/LocalUser.java index e828b0ce..07fa5275 100644 --- a/src/main/java/com/michelin/ns4kafka/security/local/LocalUser.java +++ b/src/main/java/com/michelin/ns4kafka/security/local/LocalUser.java @@ -1,37 +1,45 @@ package com.michelin.ns4kafka.security.local; import io.micronaut.core.annotation.Introspected; -import lombok.Builder; -import lombok.Getter; -import lombok.Setter; -import lombok.extern.slf4j.Slf4j; - import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.List; +import lombok.Builder; +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +/** + * Local user. + */ @Slf4j -@Introspected -@Builder @Getter @Setter +@Builder +@Introspected public class LocalUser { String username; String password; List groups; - public boolean isValidPassword(String input_password) { + /** + * Verify if the provided password is valid for this user. + * + * @param inputPassword The password to verify + * @return true if valid, false otherwise + */ + public boolean isValidPassword(String inputPassword) { log.debug("Verifying password for user {}", username); - MessageDigest digest = null; + MessageDigest digest; try { digest = MessageDigest.getInstance("SHA-256"); - byte[] encodedhash = digest.digest( - input_password.getBytes(StandardCharsets.UTF_8)); + byte[] encodedHash = digest.digest( + inputPassword.getBytes(StandardCharsets.UTF_8)); - StringBuilder hexString = new StringBuilder(2 * encodedhash.length); - for (int i = 0; i < encodedhash.length; i++) { - String hex = Integer.toHexString(0xff & encodedhash[i]); + StringBuilder hexString = new StringBuilder(2 * encodedHash.length); + for (byte hash : encodedHash) { + String hex = Integer.toHexString(0xff & hash); if (hex.length() == 1) { hexString.append('0'); } @@ -40,7 +48,6 @@ public boolean isValidPassword(String input_password) { log.debug("Provided password hash : {}", hexString); log.debug("Expected password hash : {}", password); return hexString.toString().equals(password); - } catch (NoSuchAlgorithmException e) { log.error("NoSuchAlgorithmException", e); return false; diff --git a/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java b/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java index 7d3b3c90..16d27be1 100644 --- a/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java +++ b/src/main/java/com/michelin/ns4kafka/security/local/LocalUserAuthenticationProvider.java @@ -1,47 +1,56 @@ package com.michelin.ns4kafka.security.local; -import com.michelin.ns4kafka.config.SecurityConfig; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import io.micronaut.core.annotation.Nullable; import io.micronaut.http.HttpRequest; -import io.micronaut.security.authentication.*; +import io.micronaut.security.authentication.AuthenticationException; +import io.micronaut.security.authentication.AuthenticationFailed; +import io.micronaut.security.authentication.AuthenticationFailureReason; +import io.micronaut.security.authentication.AuthenticationProvider; +import io.micronaut.security.authentication.AuthenticationRequest; +import io.micronaut.security.authentication.AuthenticationResponse; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import java.util.Map; +import java.util.Optional; import lombok.extern.slf4j.Slf4j; import org.reactivestreams.Publisher; import reactor.core.publisher.Mono; -import java.util.Map; -import java.util.Optional; - +/** + * Local user authentication provider. + */ @Slf4j @Singleton public class LocalUserAuthenticationProvider implements AuthenticationProvider> { @Inject - SecurityConfig securityConfig; + SecurityProperties securityProperties; @Inject ResourceBasedSecurityRule resourceBasedSecurityRule; @Override - public Publisher authenticate(@Nullable HttpRequest httpRequest, AuthenticationRequest authenticationRequest) { + public Publisher authenticate(@Nullable HttpRequest httpRequest, + AuthenticationRequest authenticationRequest) { return Mono.create(emitter -> { String username = authenticationRequest.getIdentity().toString(); String password = authenticationRequest.getSecret().toString(); log.debug("Checking local authentication for user: {}", username); - Optional authenticatedUser = securityConfig.getLocalUsers().stream() - .filter(localUser -> localUser.getUsername().equals(username)) - .filter(localUser -> localUser.isValidPassword(password)) - .findFirst(); + Optional authenticatedUser = securityProperties.getLocalUsers().stream() + .filter(localUser -> localUser.getUsername().equals(username)) + .filter(localUser -> localUser.isValidPassword(password)) + .findFirst(); if (authenticatedUser.isPresent()) { AuthenticationResponse user = AuthenticationResponse.success(username, - resourceBasedSecurityRule.computeRolesFromGroups(authenticatedUser.get().getGroups()), - Map.of("groups", authenticatedUser.get().getGroups())); + resourceBasedSecurityRule.computeRolesFromGroups(authenticatedUser.get().getGroups()), + Map.of("groups", authenticatedUser.get().getGroups())); emitter.success(user); } else { - emitter.error(new AuthenticationException(new AuthenticationFailed(AuthenticationFailureReason.CREDENTIALS_DO_NOT_MATCH))); + emitter.error(new AuthenticationException( + new AuthenticationFailed(AuthenticationFailureReason.CREDENTIALS_DO_NOT_MATCH))); } }); } diff --git a/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java b/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java index d7534253..6f196c93 100644 --- a/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java +++ b/src/main/java/com/michelin/ns4kafka/services/AccessControlEntryService.java @@ -8,12 +8,14 @@ import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; - import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; +/** + * Access control entry service. + */ @Singleton public class AccessControlEntryService { public static final String PUBLIC_GRANTED_TO = "*"; @@ -25,7 +27,8 @@ public class AccessControlEntryService { ApplicationContext applicationContext; /** - * Validate a new ACL + * Validate a new ACL. + * * @param accessControlEntry The ACL * @param namespace The namespace * @return A list of validation errors @@ -35,63 +38,66 @@ public List validate(AccessControlEntry accessControlEntry, Namespace na // Which resource can be granted cross namespaces List allowedResourceTypes = - List.of(AccessControlEntry.ResourceType.TOPIC, AccessControlEntry.ResourceType.CONNECT_CLUSTER); + List.of(AccessControlEntry.ResourceType.TOPIC, AccessControlEntry.ResourceType.CONNECT_CLUSTER); // Which permission can be granted cross namespaces ? READ, WRITE // Only admin can grant OWNER List allowedPermissions = - List.of(AccessControlEntry.Permission.READ, - AccessControlEntry.Permission.WRITE); + List.of(AccessControlEntry.Permission.READ, + AccessControlEntry.Permission.WRITE); // Which patternTypes can be granted List allowedPatternTypes = - List.of(AccessControlEntry.ResourcePatternType.LITERAL, - AccessControlEntry.ResourcePatternType.PREFIXED); + List.of(AccessControlEntry.ResourcePatternType.LITERAL, + AccessControlEntry.ResourcePatternType.PREFIXED); if (!allowedResourceTypes.contains(accessControlEntry.getSpec().getResourceType())) { - validationErrors.add("Invalid value " + accessControlEntry.getSpec().getResourceType() + - " for resourceType: Value must be one of [" + - allowedResourceTypes.stream().map(Object::toString).collect(Collectors.joining(", ")) + - "]"); + validationErrors.add("Invalid value " + accessControlEntry.getSpec().getResourceType() + + " for resourceType: Value must be one of [" + + allowedResourceTypes.stream().map(Object::toString).collect(Collectors.joining(", ")) + + "]"); } if (!allowedPermissions.contains(accessControlEntry.getSpec().getPermission())) { - validationErrors.add("Invalid value " + accessControlEntry.getSpec().getPermission() + - " for permission: Value must be one of [" + - allowedPermissions.stream().map(Object::toString).collect(Collectors.joining(", ")) + - "]"); + validationErrors.add("Invalid value " + accessControlEntry.getSpec().getPermission() + + " for permission: Value must be one of [" + + allowedPermissions.stream().map(Object::toString).collect(Collectors.joining(", ")) + + "]"); } if (!allowedPatternTypes.contains(accessControlEntry.getSpec().getResourcePatternType())) { - validationErrors.add("Invalid value " + accessControlEntry.getSpec().getResourcePatternType() + - " for patternType: Value must be one of [" + - allowedPatternTypes.stream().map(Object::toString).collect(Collectors.joining(", ")) + - "]"); + validationErrors.add("Invalid value " + accessControlEntry.getSpec().getResourcePatternType() + + " for patternType: Value must be one of [" + + allowedPatternTypes.stream().map(Object::toString).collect(Collectors.joining(", ")) + + "]"); } // GrantedTo Namespace exists ? NamespaceService namespaceService = applicationContext.getBean(NamespaceService.class); - Optional grantedToNamespace = namespaceService.findByName(accessControlEntry.getSpec().getGrantedTo()); + Optional grantedToNamespace = + namespaceService.findByName(accessControlEntry.getSpec().getGrantedTo()); if (grantedToNamespace.isEmpty() && !accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) { - validationErrors.add("Invalid value " + accessControlEntry.getSpec().getGrantedTo() + " for grantedTo: Namespace doesn't exist"); + validationErrors.add("Invalid value " + accessControlEntry.getSpec().getGrantedTo() + + " for grantedTo: Namespace doesn't exist"); } // Are you dumb ? if (namespace.getMetadata().getName().equals(accessControlEntry.getSpec().getGrantedTo())) { - validationErrors.add("Invalid value " + accessControlEntry.getSpec().getGrantedTo() + " for grantedTo: Why would you grant to yourself ?!"); + validationErrors.add("Invalid value " + accessControlEntry.getSpec().getGrantedTo() + + " for grantedTo: Why would you grant to yourself ?!"); } if (!isOwnerOfTopLevelAcl(accessControlEntry, namespace)) { - validationErrors.add("Invalid grant " + accessControlEntry.getSpec().getResourcePatternType() + ":" + - accessControlEntry.getSpec().getResource() + - " : Namespace is neither OWNER of LITERAL: resource nor top-level PREFIXED:resource"); + validationErrors.add("Invalid grant " + accessControlEntry.getSpec().getResourcePatternType() + ":" + + accessControlEntry.getSpec().getResource() + + " : Namespace is neither OWNER of LITERAL: resource nor top-level PREFIXED:resource"); } return validationErrors; } /** - * Validate a new ACL created by an admin + * Validate a new ACL created by an admin. * * @param accessControlEntry The ACL * @param namespace The namespace @@ -115,35 +121,36 @@ public List validateAsAdmin(AccessControlEntry accessControlEntry, Names // namespace2 OWNER:PREFIXED:project2 OK 8 // namespace2 OWNER:LITERAL:proj OK 9 return findAllForCluster(namespace.getMetadata().getCluster()) - .stream() - // don't include the ACL if it's itself (namespace+name) - .filter(ace -> !ace.getMetadata().getNamespace().equals(namespace.getMetadata().getName()) || - !ace.getMetadata().getName().equals(accessControlEntry.getMetadata().getName())) - .filter(ace -> ace.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) - .filter(ace -> ace.getSpec().getResourceType() == accessControlEntry.getSpec().getResourceType()) - .filter(ace -> { - // new PREFIXED ACL would cover existing ACLs - boolean parentOverlap = false; - if (accessControlEntry.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED) { - parentOverlap = ace.getSpec().getResource().startsWith(accessControlEntry.getSpec().getResource()); - } - // new ACL would be covered by a PREFIXED existing ACLs - boolean childOverlap = false; - if (ace.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED) { - childOverlap = accessControlEntry.getSpec().getResource().startsWith(ace.getSpec().getResource()); - } + .stream() + // don't include the ACL if it's itself (namespace+name) + .filter(ace -> !ace.getMetadata().getNamespace().equals(namespace.getMetadata().getName()) + || !ace.getMetadata().getName().equals(accessControlEntry.getMetadata().getName())) + .filter(ace -> ace.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) + .filter(ace -> ace.getSpec().getResourceType() == accessControlEntry.getSpec().getResourceType()) + .filter(ace -> { + // new PREFIXED ACL would cover existing ACLs + boolean parentOverlap = false; + if (accessControlEntry.getSpec().getResourcePatternType() + == AccessControlEntry.ResourcePatternType.PREFIXED) { + parentOverlap = ace.getSpec().getResource().startsWith(accessControlEntry.getSpec().getResource()); + } + // new ACL would be covered by a PREFIXED existing ACLs + boolean childOverlap = false; + if (ace.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED) { + childOverlap = accessControlEntry.getSpec().getResource().startsWith(ace.getSpec().getResource()); + } - boolean same = accessControlEntry.getSpec().getResource().equals(ace.getSpec().getResource()); + boolean same = accessControlEntry.getSpec().getResource().equals(ace.getSpec().getResource()); - return same || parentOverlap || childOverlap; + return same || parentOverlap || childOverlap; - }) - .map(ace -> String.format("AccessControlEntry overlaps with existing one: %s", ace)) - .toList(); + }) + .map(ace -> String.format("AccessControlEntry overlaps with existing one: %s", ace)) + .toList(); } /** - * Is namespace owner of given ACL + * Is namespace owner of given ACL. * * @param accessControlEntry The ACL * @param namespace The namespace @@ -152,39 +159,40 @@ public List validateAsAdmin(AccessControlEntry accessControlEntry, Names public boolean isOwnerOfTopLevelAcl(AccessControlEntry accessControlEntry, Namespace namespace) { // Grantor Namespace is OWNER of Resource + ResourcePattern ? return findAllGrantedToNamespace(namespace).stream() - .filter(ace -> ace.getSpec().getResourceType() == accessControlEntry.getSpec().getResourceType() && - ace.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) - .anyMatch(ace -> { - // if grantor is owner of PREFIXED resource that starts with - // owner PREFIXED: priv_bsm_ - // grants LITERAL : priv_bsm_topic OK - // grants PREFIXED: priv_bsm_topic OK - // grants PREFIXED: priv_b NO - // grants LITERAL : priv_b NO - // grants PREFIXED: priv_bsm_ OK - // grants LITERAL : pric_bsm_ OK - if (ace.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED && - accessControlEntry.getSpec().getResource().startsWith(ace.getSpec().getResource())) { - // if so, either patternType are fine (LITERAL/PREFIXED) - return true; - } - // if grantor is owner of LITERAL resource : - // exact match to LITERAL grant - // owner LITERAL : priv_bsm_topic - // grants LITERAL : priv_bsm_topic OK - // grants PREFIXED: priv_bsm_topic NO - // grants PREFIXED: priv_bs NO - // grants LITERAL : priv_b NO - // grants PREFIXED: priv_bsm_topic2 NO - // grants LITERAL : pric_bsm_topic2 NO - return ace.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.LITERAL && - accessControlEntry.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.LITERAL && - accessControlEntry.getSpec().getResource().equals(ace.getSpec().getResource()); - }); + .filter(ace -> ace.getSpec().getResourceType() == accessControlEntry.getSpec().getResourceType() + && ace.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) + .anyMatch(ace -> { + // if grantor is owner of PREFIXED resource that starts with + // owner PREFIXED: priv_bsm_ + // grants LITERAL : priv_bsm_topic OK + // grants PREFIXED: priv_bsm_topic OK + // grants PREFIXED: priv_b NO + // grants LITERAL : priv_b NO + // grants PREFIXED: priv_bsm_ OK + // grants LITERAL : pric_bsm_ OK + if (ace.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED + && accessControlEntry.getSpec().getResource().startsWith(ace.getSpec().getResource())) { + // if so, either patternType are fine (LITERAL/PREFIXED) + return true; + } + // if grantor is owner of LITERAL resource : + // exact match to LITERAL grant + // owner LITERAL : priv_bsm_topic + // grants LITERAL : priv_bsm_topic OK + // grants PREFIXED: priv_bsm_topic NO + // grants PREFIXED: priv_bs NO + // grants LITERAL : priv_b NO + // grants PREFIXED: priv_bsm_topic2 NO + // grants LITERAL : pric_bsm_topic2 NO + return ace.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.LITERAL + && accessControlEntry.getSpec().getResourcePatternType() + == AccessControlEntry.ResourcePatternType.LITERAL + && accessControlEntry.getSpec().getResource().equals(ace.getSpec().getResource()); + }); } /** - * Create an ACL in internal topic + * Create an ACL in internal topic. * * @param accessControlEntry The ACL * @return The created ACL @@ -194,72 +202,76 @@ public AccessControlEntry create(AccessControlEntry accessControlEntry) { } /** - * Delete an ACL from broker and from internal topic + * Delete an ACL from broker and from internal topic. * - * @param namespace The namespace + * @param namespace The namespace * @param accessControlEntry The ACL */ public void delete(Namespace namespace, AccessControlEntry accessControlEntry) { - AccessControlEntryAsyncExecutor accessControlEntryAsyncExecutor = applicationContext.getBean(AccessControlEntryAsyncExecutor.class, + AccessControlEntryAsyncExecutor accessControlEntryAsyncExecutor = + applicationContext.getBean(AccessControlEntryAsyncExecutor.class, Qualifiers.byName(accessControlEntry.getMetadata().getCluster())); - accessControlEntryAsyncExecutor.deleteNs4KafkaACL(namespace, accessControlEntry); + accessControlEntryAsyncExecutor.deleteNs4KafkaAcl(namespace, accessControlEntry); accessControlEntryRepository.delete(accessControlEntry); } /** - * Find all ACLs granted to given namespace - * Will also return public granted ACLs + * Find all ACLs granted to given namespace. + * Will also return public granted ACLs. * * @param namespace The namespace * @return A list of ACLs */ public List findAllGrantedToNamespace(Namespace namespace) { return accessControlEntryRepository.findAll() - .stream() - .filter(accessControlEntry -> accessControlEntry.getSpec().getGrantedTo().equals(namespace.getMetadata().getName()) || - accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) - .toList(); + .stream() + .filter(accessControlEntry -> + accessControlEntry.getSpec().getGrantedTo().equals(namespace.getMetadata().getName()) + || accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) + .toList(); } /** - * Find all public granted ACLs + * Find all public granted ACLs. * * @return A list of ACLs */ public List findAllPublicGrantedTo() { return accessControlEntryRepository.findAll() - .stream() - .filter(accessControlEntry -> accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) - .toList(); + .stream() + .filter(accessControlEntry -> accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO)) + .toList(); } /** - * Find all ACLs of given namespace + * Find all ACLs of given namespace. * * @param namespace The namespace * @return A list of ACLs */ public List findAllForNamespace(Namespace namespace) { return accessControlEntryRepository.findAll().stream() - .filter(accessControlEntry -> accessControlEntry.getMetadata().getNamespace().equals(namespace.getMetadata().getName())) - .toList(); + .filter(accessControlEntry -> accessControlEntry.getMetadata().getNamespace() + .equals(namespace.getMetadata().getName())) + .toList(); } /** - * Find all ACLs of given cluster + * Find all ACLs of given cluster. * * @param cluster The cluster * @return A list of ACLs */ public List findAllForCluster(String cluster) { return accessControlEntryRepository.findAll().stream() - .filter(accessControlEntry -> accessControlEntry.getMetadata().getCluster().equals(cluster)) - .toList(); + .filter(accessControlEntry -> accessControlEntry.getMetadata().getCluster().equals(cluster)) + .toList(); } /** - * Find all the ACLs on all clusters + * Find all the ACLs on all clusters. + * * @return A list of ACLs */ public List findAll() { @@ -267,32 +279,29 @@ public List findAll() { } /** - * Does given namespace is owner of the given resource ? + * Is namespace owner of the given resource. * * @param namespace The namespace * @param resourceType The resource type to filter * @param resource The resource name * @return true if it is, false otherwise */ - public boolean isNamespaceOwnerOfResource(String namespace, AccessControlEntry.ResourceType resourceType, String resource) { + public boolean isNamespaceOwnerOfResource(String namespace, AccessControlEntry.ResourceType resourceType, + String resource) { return accessControlEntryRepository.findAll() - .stream() - .filter(accessControlEntry -> accessControlEntry.getSpec().getGrantedTo().equals(namespace)) - .filter(accessControlEntry -> accessControlEntry.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) - .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == resourceType) - .anyMatch(accessControlEntry -> { - switch (accessControlEntry.getSpec().getResourcePatternType()) { - case PREFIXED: - return resource.startsWith(accessControlEntry.getSpec().getResource()); - case LITERAL: - return resource.equals(accessControlEntry.getSpec().getResource()); - } - return false; - }); + .stream() + .filter(accessControlEntry -> accessControlEntry.getSpec().getGrantedTo().equals(namespace)) + .filter(accessControlEntry -> accessControlEntry.getSpec().getPermission() + == AccessControlEntry.Permission.OWNER) + .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == resourceType) + .anyMatch(accessControlEntry -> switch (accessControlEntry.getSpec().getResourcePatternType()) { + case PREFIXED -> resource.startsWith(accessControlEntry.getSpec().getResource()); + case LITERAL -> resource.equals(accessControlEntry.getSpec().getResource()); + }); } /** - * Find an ACL by name + * Find an ACL by name. * * @param namespace The namespace * @param name The ACL name diff --git a/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java b/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java index 9fdb2565..6a09e0fb 100644 --- a/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java +++ b/src/main/java/com/michelin/ns4kafka/services/ConnectClusterService.java @@ -1,12 +1,12 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; import com.michelin.ns4kafka.models.connect.cluster.VaultResponse; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.repositories.ConnectClusterRepository; import com.michelin.ns4kafka.services.clients.connect.KafkaConnectClient; import com.michelin.ns4kafka.services.clients.connect.entities.ServerInfo; @@ -19,10 +19,6 @@ import io.micronaut.http.client.exceptions.HttpClientException; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; @@ -30,7 +26,13 @@ import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +/** + * Service to manage Kafka Connect clusters. + */ @Slf4j @Singleton public class ConnectClusterService { @@ -51,17 +53,18 @@ public class ConnectClusterService { ConnectClusterRepository connectClusterRepository; @Inject - List kafkaAsyncExecutorConfig; + List managedClusterProperties; @Inject - SecurityConfig securityConfig; + SecurityProperties securityProperties; @Inject @Client HttpClient httpClient; /** - * Find all self deployed Connect clusters + * Find all self deployed Connect clusters. + * * @param all Include hard-declared Connect clusters * @return A list of Connect clusters */ @@ -69,126 +72,130 @@ public Flux findAll(boolean all) { List results = connectClusterRepository.findAll(); if (all) { - results.addAll(kafkaAsyncExecutorConfig + results.addAll(managedClusterProperties + .stream() + .map(config -> config.getConnects().entrySet() .stream() - .map(config -> config.getConnects().entrySet() - .stream() - .map(entry -> - ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name(entry.getKey()) - .cluster(config.getName()) - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url(entry.getValue().getUrl()) - .username(entry.getValue().getBasicAuthUsername()) - .password(entry.getValue().getBasicAuthPassword()) - .build()) - .build()) - .toList()) - .flatMap(List::stream) - .toList()); + .map(entry -> + ConnectCluster.builder() + .metadata(ObjectMeta.builder() + .name(entry.getKey()) + .cluster(config.getName()) + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url(entry.getValue().getUrl()) + .username(entry.getValue().getBasicAuthUsername()) + .password(entry.getValue().getBasicAuthPassword()) + .build()) + .build()) + .toList()) + .flatMap(List::stream) + .toList()); } return Flux.fromIterable(results) - .flatMap(connectCluster -> kafkaConnectClient.version(connectCluster.getMetadata().getCluster(), - connectCluster.getMetadata().getName()) - .doOnError(error -> { - connectCluster.getSpec().setStatus(ConnectCluster.Status.IDLE); - connectCluster.getSpec().setStatusMessage(error.getMessage()); - }) - .doOnSuccess(response -> { - connectCluster.getSpec().setStatus(ConnectCluster.Status.HEALTHY); - connectCluster.getSpec().setStatusMessage(null); - }) - .map(response -> connectCluster) - .onErrorReturn(connectCluster)); + .flatMap(connectCluster -> kafkaConnectClient.version(connectCluster.getMetadata().getCluster(), + connectCluster.getMetadata().getName()) + .doOnError(error -> { + connectCluster.getSpec().setStatus(ConnectCluster.Status.IDLE); + connectCluster.getSpec().setStatusMessage(error.getMessage()); + }) + .doOnSuccess(response -> { + connectCluster.getSpec().setStatus(ConnectCluster.Status.HEALTHY); + connectCluster.getSpec().setStatusMessage(null); + }) + .map(response -> connectCluster) + .onErrorReturn(connectCluster)); } /** - * Find all self deployed Connect clusters for a given namespace with a given list of permissions + * Find all self deployed Connect clusters for a given namespace with a given list of permissions. * * @param namespace The namespace * @param permissions The list of permission to filter on * @return A list of Connect clusters */ - public List findAllByNamespace(Namespace namespace, List permissions) { + public List findAllByNamespace(Namespace namespace, + List permissions) { List acls = accessControlEntryService.findAllGrantedToNamespace(namespace).stream() - .filter(acl -> permissions.contains(acl.getSpec().getPermission())) - .filter(acl -> acl.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT_CLUSTER).toList(); + .filter(acl -> permissions.contains(acl.getSpec().getPermission())) + .filter(acl -> acl.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT_CLUSTER).toList(); return connectClusterRepository.findAllForCluster(namespace.getMetadata().getCluster()) - .stream() - .filter(connector -> acls.stream().anyMatch(accessControlEntry -> - switch (accessControlEntry.getSpec().getResourcePatternType()) { - case PREFIXED -> - connector.getMetadata().getName().startsWith(accessControlEntry.getSpec().getResource()); - case LITERAL -> - connector.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); - })) - .toList(); + .stream() + .filter(connector -> acls.stream().anyMatch(accessControlEntry -> switch (accessControlEntry.getSpec() + .getResourcePatternType()) { + case PREFIXED -> connector.getMetadata().getName() + .startsWith(accessControlEntry.getSpec().getResource()); + case LITERAL -> connector.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); + })) + .toList(); } /** - * Find all self deployed Connect clusters whose namespace is owner + * Find all self deployed Connect clusters whose namespace is owner. * * @param namespace The namespace * @return The list of owned Connect cluster */ public List findAllByNamespaceOwner(Namespace namespace) { return findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER)) - .stream() - .map(connectCluster -> { - var builder = ConnectCluster.ConnectClusterSpec.builder() - .url(connectCluster.getSpec().getUrl()) - .username(connectCluster.getSpec().getUsername()) - .password(EncryptionUtils.decryptAES256GCM(connectCluster.getSpec().getPassword(), securityConfig.getAes256EncryptionKey())) - .aes256Key(EncryptionUtils.decryptAES256GCM(connectCluster.getSpec().getAes256Key(), securityConfig.getAes256EncryptionKey())) - .aes256Salt(EncryptionUtils.decryptAES256GCM(connectCluster.getSpec().getAes256Salt(), securityConfig.getAes256EncryptionKey())) - .aes256Format(connectCluster.getSpec().getAes256Format()); - - try { - kafkaConnectClient.version(connectCluster.getMetadata().getCluster(), connectCluster.getMetadata().getName()).block(); - builder.status(ConnectCluster.Status.HEALTHY); - } catch (HttpClientException e) { - builder.status(ConnectCluster.Status.IDLE); - builder.statusMessage(e.getMessage()); - } - - return ConnectCluster.builder() - .metadata(connectCluster.getMetadata()) - .spec(builder.build()) - .build(); - }) - .toList(); + .stream() + .map(connectCluster -> { + var builder = ConnectCluster.ConnectClusterSpec.builder() + .url(connectCluster.getSpec().getUrl()) + .username(connectCluster.getSpec().getUsername()) + .password(EncryptionUtils.decryptAes256Gcm(connectCluster.getSpec().getPassword(), + securityProperties.getAes256EncryptionKey())) + .aes256Key(EncryptionUtils.decryptAes256Gcm(connectCluster.getSpec().getAes256Key(), + securityProperties.getAes256EncryptionKey())) + .aes256Salt(EncryptionUtils.decryptAes256Gcm(connectCluster.getSpec().getAes256Salt(), + securityProperties.getAes256EncryptionKey())) + .aes256Format(connectCluster.getSpec().getAes256Format()); + + try { + kafkaConnectClient.version(connectCluster.getMetadata().getCluster(), + connectCluster.getMetadata().getName()).block(); + builder.status(ConnectCluster.Status.HEALTHY); + } catch (HttpClientException e) { + builder.status(ConnectCluster.Status.IDLE); + builder.statusMessage(e.getMessage()); + } + + return ConnectCluster.builder() + .metadata(connectCluster.getMetadata()) + .spec(builder.build()) + .build(); + }) + .toList(); } /** - * Find all self deployed Connect clusters whose namespace has write access + * Find all self deployed Connect clusters whose namespace has write access. * * @param namespace The namespace * @return The list of Connect cluster with write access */ public List findAllByNamespaceWrite(Namespace namespace) { return Stream.concat( - this.findAllByNamespaceOwner(namespace).stream(), - this.findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.WRITE)).stream() - .map(connectCluster -> ConnectCluster.builder() - .metadata(connectCluster.getMetadata()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url(connectCluster.getSpec().getUrl()) - .username(connectCluster.getSpec().getUsername()) - .password(WILDCARD_SECRET) - .aes256Key(WILDCARD_SECRET) - .aes256Salt(WILDCARD_SECRET) - .aes256Format(connectCluster.getSpec().getAes256Format()) - .build()) - .build()) + this.findAllByNamespaceOwner(namespace).stream(), + this.findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.WRITE)).stream() + .map(connectCluster -> ConnectCluster.builder() + .metadata(connectCluster.getMetadata()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url(connectCluster.getSpec().getUrl()) + .username(connectCluster.getSpec().getUsername()) + .password(WILDCARD_SECRET) + .aes256Key(WILDCARD_SECRET) + .aes256Salt(WILDCARD_SECRET) + .aes256Format(connectCluster.getSpec().getAes256Format()) + .build()) + .build()) ).toList(); } /** - * Find a self deployed Connect cluster by namespace and name with owner rights + * Find a self deployed Connect cluster by namespace and name with owner rights. * * @param namespace The namespace * @param connectClusterName The connect worker name @@ -196,13 +203,13 @@ public List findAllByNamespaceWrite(Namespace namespace) { */ public Optional findByNamespaceAndNameOwner(Namespace namespace, String connectClusterName) { return findAllByNamespaceOwner(namespace) - .stream() - .filter(connectCluster -> connectCluster.getMetadata().getName().equals(connectClusterName)) - .findFirst(); + .stream() + .filter(connectCluster -> connectCluster.getMetadata().getName().equals(connectClusterName)) + .findFirst(); } /** - * Create a given connect worker + * Create a given connect worker. * * @param connectCluster The connect worker * @return The created connect worker @@ -210,26 +217,29 @@ public Optional findByNamespaceAndNameOwner(Namespace namespace, public ConnectCluster create(ConnectCluster connectCluster) { if (StringUtils.hasText(connectCluster.getSpec().getPassword())) { connectCluster.getSpec() - .setPassword(EncryptionUtils.encryptAES256GCM(connectCluster.getSpec().getPassword(), securityConfig.getAes256EncryptionKey())); + .setPassword(EncryptionUtils.encryptAes256Gcm(connectCluster.getSpec().getPassword(), + securityProperties.getAes256EncryptionKey())); } // encrypt aes256 key if present if (StringUtils.hasText(connectCluster.getSpec().getAes256Key())) { connectCluster.getSpec() - .setAes256Key(EncryptionUtils.encryptAES256GCM(connectCluster.getSpec().getAes256Key(), securityConfig.getAes256EncryptionKey())); + .setAes256Key(EncryptionUtils.encryptAes256Gcm(connectCluster.getSpec().getAes256Key(), + securityProperties.getAes256EncryptionKey())); } // encrypt aes256 salt if present if (StringUtils.hasText(connectCluster.getSpec().getAes256Salt())) { connectCluster.getSpec() - .setAes256Salt(EncryptionUtils.encryptAES256GCM(connectCluster.getSpec().getAes256Salt(), securityConfig.getAes256EncryptionKey())); + .setAes256Salt(EncryptionUtils.encryptAes256Gcm(connectCluster.getSpec().getAes256Salt(), + securityProperties.getAes256EncryptionKey())); } return connectClusterRepository.create(connectCluster); } /** - * Validate the given connect worker configuration for creation + * Validate the given connect worker configuration for creation. * * @param connectCluster The connect worker to validate * @return A list of validation errors @@ -237,36 +247,48 @@ public ConnectCluster create(ConnectCluster connectCluster) { public Mono> validateConnectClusterCreation(ConnectCluster connectCluster) { List errors = new ArrayList<>(); - if (kafkaAsyncExecutorConfig.stream().anyMatch(cluster -> - cluster.getConnects().entrySet().stream().anyMatch(entry -> entry.getKey().equals(connectCluster.getMetadata().getName())))) { - errors.add(String.format("A Kafka Connect is already defined globally with the name \"%s\". Please provide a different name.", connectCluster.getMetadata().getName())); + if (managedClusterProperties.stream().anyMatch(cluster -> + cluster.getConnects().entrySet().stream() + .anyMatch(entry -> entry.getKey().equals(connectCluster.getMetadata().getName())))) { + errors.add(String.format( + "A Kafka Connect is already defined globally with the name \"%s\". Please provide a different name.", + connectCluster.getMetadata().getName())); } try { - MutableHttpRequest request = HttpRequest.GET(new URL(StringUtils.prependUri(connectCluster.getSpec().getUrl(), "/connectors?expand=info&expand=status")).toString()) - .basicAuth(connectCluster.getSpec().getUsername(), connectCluster.getSpec().getPassword()); + MutableHttpRequest request = HttpRequest.GET(new URL( + StringUtils.prependUri(connectCluster.getSpec().getUrl(), + "/connectors?expand=info&expand=status")).toString()) + .basicAuth(connectCluster.getSpec().getUsername(), connectCluster.getSpec().getPassword()); Mono httpResponse = Mono.from(httpClient.retrieve(request, ServerInfo.class)); return httpResponse - .doOnError(error -> errors.add(String.format("The Kafka Connect \"%s\" is not healthy (%s).", connectCluster.getMetadata().getName(), error.getMessage()))) - .doOnEach(signal -> { - // If the key or salt is defined, but one of them is missing - if ((signal.isOnError() || signal.isOnNext()) && (StringUtils.hasText(connectCluster.getSpec().getAes256Key()) ^ StringUtils.hasText(connectCluster.getSpec().getAes256Salt()))) { - errors.add(String.format("The Connect cluster \"%s\" \"aes256Key\" and \"aes256Salt\" specs are required to activate the encryption.", connectCluster.getMetadata().getName())); - - } - }) - .map(response -> errors) - .onErrorReturn(errors); + .doOnError(error -> errors.add(String.format("The Kafka Connect \"%s\" is not healthy (%s).", + connectCluster.getMetadata().getName(), error.getMessage()))) + .doOnEach(signal -> { + // If the key or salt is defined, but one of them is missing + if ((signal.isOnError() || signal.isOnNext()) + && (StringUtils.hasText(connectCluster.getSpec().getAes256Key()) + ^ StringUtils.hasText(connectCluster.getSpec().getAes256Salt()))) { + errors.add(String.format( + "The Connect cluster \"%s\" \"aes256Key\" and \"aes256Salt\" specs are " + + "required to activate the encryption.", + connectCluster.getMetadata().getName())); + + } + }) + .map(response -> errors) + .onErrorReturn(errors); } catch (MalformedURLException e) { - errors.add(String.format("The Kafka Connect \"%s\" has a malformed URL \"%s\".", connectCluster.getMetadata().getName(), connectCluster.getSpec().getUrl())); + errors.add(String.format("The Kafka Connect \"%s\" has a malformed URL \"%s\".", + connectCluster.getMetadata().getName(), connectCluster.getSpec().getUrl())); return Mono.just(errors); } } /** - * Validate the given connect worker has configuration for vaults + * Validate the given connect worker has configuration for vaults. * * @param connectCluster The Kafka connect worker to validate * @return A list of validation errors @@ -274,31 +296,34 @@ public Mono> validateConnectClusterCreation(ConnectCluster connectC public List validateConnectClusterVault(final Namespace namespace, final String connectCluster) { final var errors = new ArrayList(); - final List kafkaConnects = findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER, AccessControlEntry.Permission.WRITE)); + final List kafkaConnects = findAllByNamespace(namespace, + List.of(AccessControlEntry.Permission.OWNER, AccessControlEntry.Permission.WRITE)); if (kafkaConnects.isEmpty()) { errors.add("No Connect Cluster available."); return errors; } - if (kafkaConnects.stream().noneMatch(cc -> StringUtils.hasText(cc.getSpec().getAes256Key()) && - StringUtils.hasText(cc.getSpec().getAes256Salt()))) { + if (kafkaConnects.stream().noneMatch(cc -> StringUtils.hasText(cc.getSpec().getAes256Key()) + && StringUtils.hasText(cc.getSpec().getAes256Salt()))) { errors.add("No Connect cluster available with valid aes256 specs configuration."); return errors; } final Optional kafkaConnect = kafkaConnects.stream() - .filter(cc -> cc.getMetadata().getName().equals(connectCluster) && - StringUtils.hasText(cc.getSpec().getAes256Key()) && - StringUtils.hasText(cc.getSpec().getAes256Salt())) - .findFirst(); + .filter(cc -> cc.getMetadata().getName().equals(connectCluster) + && StringUtils.hasText(cc.getSpec().getAes256Key()) + && StringUtils.hasText(cc.getSpec().getAes256Salt())) + .findFirst(); if (kafkaConnect.isEmpty()) { final String allowedConnectClusters = kafkaConnects.stream() - .filter(cc -> StringUtils.hasText(cc.getSpec().getAes256Key()) && StringUtils.hasText(cc.getSpec().getAes256Salt())) - .map(cc -> cc.getMetadata().getName()) - .collect(Collectors.joining(", ")); - errors.add("Invalid value \"" + connectCluster + "\" for Connect Cluster: Value must be one of [" + allowedConnectClusters + "]."); + .filter(cc -> StringUtils.hasText(cc.getSpec().getAes256Key()) + && StringUtils.hasText(cc.getSpec().getAes256Salt())) + .map(cc -> cc.getMetadata().getName()) + .collect(Collectors.joining(", ")); + errors.add("Invalid value \"" + connectCluster + "\" for Connect Cluster: Value must be one of [" + + allowedConnectClusters + "]."); return errors; } @@ -306,7 +331,7 @@ public List validateConnectClusterVault(final Namespace namespace, final } /** - * Delete a given Connect cluster + * Delete a given Connect cluster. * * @param connectCluster The Connect cluster */ @@ -315,7 +340,7 @@ public void delete(ConnectCluster connectCluster) { } /** - * Is given namespace owner of the given connect worker + * Is given namespace owner of the given connect worker. * * @param namespace The namespace * @param connectCluster The Kafka connect cluster @@ -323,11 +348,11 @@ public void delete(ConnectCluster connectCluster) { */ public boolean isNamespaceOwnerOfConnectCluster(Namespace namespace, String connectCluster) { return accessControlEntryService.isNamespaceOwnerOfResource(namespace.getMetadata().getName(), - AccessControlEntry.ResourceType.CONNECT_CLUSTER, connectCluster); + AccessControlEntry.ResourceType.CONNECT_CLUSTER, connectCluster); } /** - * Is given namespace allowed (Owner or Writer) for the given connect worker + * Is given namespace allowed (Owner or Writer) for the given connect worker. * * @param namespace The namespace * @param connectCluster The Kafka connect cluster @@ -335,8 +360,8 @@ public boolean isNamespaceOwnerOfConnectCluster(Namespace namespace, String conn */ public boolean isNamespaceAllowedForConnectCluster(Namespace namespace, String connectCluster) { return findAllByNamespaceWrite(namespace) - .stream() - .anyMatch(kafkaConnect -> kafkaConnect.getMetadata().getName().equals(connectCluster)); + .stream() + .anyMatch(kafkaConnect -> kafkaConnect.getMetadata().getName().equals(connectCluster)); } /** @@ -347,39 +372,44 @@ public boolean isNamespaceAllowedForConnectCluster(Namespace namespace, String c * @param passwords The passwords list to encrypt. * @return The encrypted password. */ - public List vaultPassword(final Namespace namespace, final String connectCluster, final List passwords) { - final Optional kafkaConnect = findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER, AccessControlEntry.Permission.WRITE)) - .stream() - .filter(cc -> - cc.getMetadata().getName().equals(connectCluster) && - StringUtils.hasText(cc.getSpec().getAes256Key()) && - StringUtils.hasText(cc.getSpec().getAes256Salt()) - ) - .findFirst(); + public List vaultPassword(final Namespace namespace, final String connectCluster, + final List passwords) { + final Optional kafkaConnect = findAllByNamespace(namespace, + List.of(AccessControlEntry.Permission.OWNER, AccessControlEntry.Permission.WRITE)) + .stream() + .filter(cc -> + cc.getMetadata().getName().equals(connectCluster) + && StringUtils.hasText(cc.getSpec().getAes256Key()) + && StringUtils.hasText(cc.getSpec().getAes256Salt()) + ) + .findFirst(); if (kafkaConnect.isEmpty()) { return passwords.stream() - .map(password -> VaultResponse.builder() - .spec(VaultResponse.VaultResponseSpec.builder() - .clearText(password) - .encrypted(password) - .build()) - .build()) - .toList(); + .map(password -> VaultResponse.builder() + .spec(VaultResponse.VaultResponseSpec.builder() + .clearText(password) + .encrypted(password) + .build()) + .build()) + .toList(); } - final String aes256Key = EncryptionUtils.decryptAES256GCM(kafkaConnect.get().getSpec().getAes256Key(), securityConfig.getAes256EncryptionKey()); - final String aes256Salt = EncryptionUtils.decryptAES256GCM(kafkaConnect.get().getSpec().getAes256Salt(), securityConfig.getAes256EncryptionKey()); - final String aes256Format = StringUtils.hasText(kafkaConnect.get().getSpec().getAes256Format()) ? - kafkaConnect.get().getSpec().getAes256Format() : DEFAULT_FORMAT; + final String aes256Key = EncryptionUtils.decryptAes256Gcm(kafkaConnect.get().getSpec().getAes256Key(), + securityProperties.getAes256EncryptionKey()); + final String aes256Salt = EncryptionUtils.decryptAes256Gcm(kafkaConnect.get().getSpec().getAes256Salt(), + securityProperties.getAes256EncryptionKey()); + final String aes256Format = StringUtils.hasText(kafkaConnect.get().getSpec().getAes256Format()) + ? kafkaConnect.get().getSpec().getAes256Format() : DEFAULT_FORMAT; return passwords.stream() - .map(password -> VaultResponse.builder() - .spec(VaultResponse.VaultResponseSpec.builder() - .clearText(password) - .encrypted(String.format(aes256Format, EncryptionUtils.encryptAESWithPrefix(password, aes256Key, aes256Salt))) - .build()) - .build()) - .toList(); + .map(password -> VaultResponse.builder() + .spec(VaultResponse.VaultResponseSpec.builder() + .clearText(password) + .encrypted(String.format(aes256Format, + EncryptionUtils.encryptAesWithPrefix(password, aes256Key, aes256Salt))) + .build()) + .build()) + .toList(); } diff --git a/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java b/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java index 15daab19..1be6cf98 100644 --- a/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java +++ b/src/main/java/com/michelin/ns4kafka/services/ConnectorService.java @@ -1,5 +1,7 @@ package com.michelin.ns4kafka.services; +import static com.michelin.ns4kafka.utils.config.ConnectorConfig.CONNECTOR_CLASS; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.connector.Connector; @@ -13,19 +15,19 @@ import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; -import static com.michelin.ns4kafka.utils.config.ConnectorConfig.CONNECTOR_CLASS; - +/** + * Service to manage connectors. + */ @Slf4j @Singleton public class ConnectorService { @@ -45,59 +47,63 @@ public class ConnectorService { ConnectClusterService connectClusterService; /** - * Find all connectors by given namespace + * Find all connectors by given namespace. + * * @param namespace The namespace * @return A list of connectors */ public List findAllForNamespace(Namespace namespace) { List acls = accessControlEntryService.findAllGrantedToNamespace(namespace); return connectorRepository.findAllForCluster(namespace.getMetadata().getCluster()) - .stream() - .filter(connector -> acls.stream().anyMatch(accessControlEntry -> { - if (accessControlEntry.getSpec().getPermission() != AccessControlEntry.Permission.OWNER) { - return false; - } - if (accessControlEntry.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT) { - switch (accessControlEntry.getSpec().getResourcePatternType()) { - case PREFIXED: - return connector.getMetadata().getName().startsWith(accessControlEntry.getSpec().getResource()); - case LITERAL: - return connector.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); - } - } + .stream() + .filter(connector -> acls.stream().anyMatch(accessControlEntry -> { + if (accessControlEntry.getSpec().getPermission() != AccessControlEntry.Permission.OWNER) { return false; - })) - .toList(); + } + if (accessControlEntry.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT) { + return switch (accessControlEntry.getSpec().getResourcePatternType()) { + case PREFIXED -> connector.getMetadata().getName() + .startsWith(accessControlEntry.getSpec().getResource()); + case LITERAL -> + connector.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); + }; + } + return false; + })) + .toList(); } /** - * Find all connectors by given namespace and Connect cluster - * @param namespace The namespace + * Find all connectors by given namespace and Connect cluster. + * + * @param namespace The namespace * @param connectCluster The Connect cluster * @return A list of connectors */ public List findAllByConnectCluster(Namespace namespace, String connectCluster) { return connectorRepository.findAllForCluster(namespace.getMetadata().getCluster()) - .stream() - .filter(connector -> connector.getSpec().getConnectCluster().equals(connectCluster)) - .toList(); + .stream() + .filter(connector -> connector.getSpec().getConnectCluster().equals(connectCluster)) + .toList(); } /** - * Find a connector by namespace and name + * Find a connector by namespace and name. + * * @param namespace The namespace * @param connector The connector name * @return An optional connector */ public Optional findByName(Namespace namespace, String connector) { return findAllForNamespace(namespace) - .stream() - .filter(connect -> connect.getMetadata().getName().equals(connector)) - .findFirst(); + .stream() + .filter(connect -> connect.getMetadata().getName().equals(connector)) + .findFirst(); } /** - * Validate configurations of a given connector against the namespace rules + * Validate configurations of a given connector against the namespace rules. + * * @param namespace The namespace * @param connector The connector to validate * @return A list of errors @@ -105,14 +111,17 @@ public Optional findByName(Namespace namespace, String connector) { public Mono> validateLocally(Namespace namespace, Connector connector) { // Check whether target Connect Cluster is allowed for this namespace List selfDeployedConnectClusters = connectClusterService.findAllByNamespaceWrite(namespace) - .stream() - .map(connectCluster -> connectCluster.getMetadata().getName()).toList(); - - if (!namespace.getSpec().getConnectClusters().contains(connector.getSpec().getConnectCluster()) && - !selfDeployedConnectClusters.contains(connector.getSpec().getConnectCluster())) { - String allowedConnectClusters = Stream.concat(namespace.getSpec().getConnectClusters().stream(), selfDeployedConnectClusters.stream()).collect(Collectors.joining(", ")); + .stream() + .map(connectCluster -> connectCluster.getMetadata().getName()).toList(); + + if (!namespace.getSpec().getConnectClusters().contains(connector.getSpec().getConnectCluster()) + && !selfDeployedConnectClusters.contains(connector.getSpec().getConnectCluster())) { + String allowedConnectClusters = + Stream.concat(namespace.getSpec().getConnectClusters().stream(), selfDeployedConnectClusters.stream()) + .collect(Collectors.joining(", ")); return Mono.just( - List.of("Invalid value " + connector.getSpec().getConnectCluster() + " for spec.connectCluster: Value must be one of [" + allowedConnectClusters + "]")); + List.of("Invalid value " + connector.getSpec().getConnectCluster() + + " for spec.connectCluster: Value must be one of [" + allowedConnectClusters + "]")); } // If class doesn't exist, no need to go further @@ -121,53 +130,60 @@ public Mono> validateLocally(Namespace namespace, Connector connect } // Connector type exists on this target connect cluster ? - return kafkaConnectClient.connectPlugins(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster()) - .map(connectorPluginInfos -> { - Optional connectorType = connectorPluginInfos - .stream() - .filter(connectPluginItem -> connectPluginItem.className().equals(connector.getSpec().getConfig().get(CONNECTOR_CLASS))) - .map(connectorPluginInfo -> connectorPluginInfo.type().toString().toLowerCase(Locale.ROOT)) - .findFirst(); - - if (connectorType.isEmpty()) { - return List.of("Failed to find any class that implements Connector and which name matches " + - connector.getSpec().getConfig().get(CONNECTOR_CLASS)); - } - - return namespace.getSpec().getConnectValidator() != null ? namespace.getSpec().getConnectValidator().validate(connector, connectorType.get()) - : Collections.emptyList(); - }); + return kafkaConnectClient.connectPlugins(namespace.getMetadata().getCluster(), + connector.getSpec().getConnectCluster()) + .map(connectorPluginInfos -> { + Optional connectorType = connectorPluginInfos + .stream() + .filter(connectPluginItem -> connectPluginItem.className() + .equals(connector.getSpec().getConfig().get(CONNECTOR_CLASS))) + .map(connectorPluginInfo -> connectorPluginInfo.type().toString().toLowerCase(Locale.ROOT)) + .findFirst(); + + if (connectorType.isEmpty()) { + return List.of("Failed to find any class that implements Connector and which name matches " + + connector.getSpec().getConfig().get(CONNECTOR_CLASS)); + } + + return namespace.getSpec().getConnectValidator() != null + ? namespace.getSpec().getConnectValidator().validate(connector, connectorType.get()) + : Collections.emptyList(); + }); } /** - * Is given namespace owner of the given connector + * Is given namespace owner of the given connector. + * * @param namespace The namespace - * @param connect The connector + * @param connect The connector * @return true if it is, false otherwise */ public boolean isNamespaceOwnerOfConnect(Namespace namespace, String connect) { - return accessControlEntryService.isNamespaceOwnerOfResource(namespace.getMetadata().getName(), AccessControlEntry.ResourceType.CONNECT, connect); + return accessControlEntryService.isNamespaceOwnerOfResource(namespace.getMetadata().getName(), + AccessControlEntry.ResourceType.CONNECT, connect); } /** - * Validate configurations of a given connector against the cluster + * Validate configurations of a given connector against the cluster. + * * @param namespace The namespace * @param connector The connector * @return A list of errors */ public Mono> validateRemotely(Namespace namespace, Connector connector) { return kafkaConnectClient.validate(namespace.getMetadata().getCluster(), - connector.getSpec().getConnectCluster(), connector.getSpec().getConfig().get(CONNECTOR_CLASS), - ConnectorSpecs.builder().config(connector.getSpec().getConfig()).build()) - .map(configInfos -> configInfos.configs() - .stream() - .filter(configInfo -> !configInfo.configValue().errors().isEmpty()) - .flatMap(configInfo -> configInfo.configValue().errors().stream()) - .toList()); + connector.getSpec().getConnectCluster(), connector.getSpec().getConfig().get(CONNECTOR_CLASS), + ConnectorSpecs.builder().config(connector.getSpec().getConfig()).build()) + .map(configInfos -> configInfos.configs() + .stream() + .filter(configInfo -> !configInfo.configValue().errors().isEmpty()) + .flatMap(configInfo -> configInfo.configValue().errors().stream()) + .toList()); } /** - * Create a given connector + * Create a given connector. + * * @param connector The connector to create * @return The created connector */ @@ -176,73 +192,79 @@ public Connector createOrUpdate(Connector connector) { } /** - * Delete a given connector + * Delete a given connector. + * * @param namespace The namespace * @param connector The connector */ public Mono> delete(Namespace namespace, Connector connector) { - return kafkaConnectClient.delete(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) - .defaultIfEmpty(HttpResponse.noContent()) - .map(httpResponse -> { - connectorRepository.delete(connector); - - if (log.isInfoEnabled()) { - log.info("Success removing Connector [" + connector.getMetadata().getName() + - "] on Kafka [" + namespace.getMetadata().getName() + - "] Connect [" + connector.getSpec().getConnectCluster() + "]"); - } - - return httpResponse; - }); + return kafkaConnectClient.delete(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), + connector.getMetadata().getName()) + .defaultIfEmpty(HttpResponse.noContent()) + .map(httpResponse -> { + connectorRepository.delete(connector); + + if (log.isInfoEnabled()) { + log.info("Success removing Connector [" + connector.getMetadata().getName() + + "] on Kafka [" + namespace.getMetadata().getName() + + "] Connect [" + connector.getSpec().getConnectCluster() + "]"); + } + + return httpResponse; + }); } /** - * List the connectors that are not synchronized to ns4kafka by namespace + * List the connectors that are not synchronized to Ns4Kafka by namespace. + * * @param namespace The namespace * @return The list of connectors */ public Flux listUnsynchronizedConnectors(Namespace namespace) { ConnectorAsyncExecutor connectorAsyncExecutor = applicationContext.getBean(ConnectorAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster())); + Qualifiers.byName(namespace.getMetadata().getCluster())); // Get all connectors from all connect clusters Stream connectClusters = Stream.concat(namespace.getSpec().getConnectClusters().stream(), - connectClusterService.findAllByNamespaceWrite(namespace) - .stream() - .map(connectCluster -> connectCluster.getMetadata().getName())); + connectClusterService.findAllByNamespaceWrite(namespace) + .stream() + .map(connectCluster -> connectCluster.getMetadata().getName())); return Flux.fromStream(connectClusters) - .flatMap(connectClusterName -> connectorAsyncExecutor.collectBrokerConnectors(connectClusterName) - .filter(connector -> isNamespaceOwnerOfConnect(namespace, connector.getMetadata().getName())) - .filter(connector -> findByName(namespace, connector.getMetadata().getName()).isEmpty())); + .flatMap(connectClusterName -> connectorAsyncExecutor.collectBrokerConnectors(connectClusterName) + .filter(connector -> isNamespaceOwnerOfConnect(namespace, connector.getMetadata().getName())) + .filter(connector -> findByName(namespace, connector.getMetadata().getName()).isEmpty())); } /** - * Restart a given connector + * Restart a given connector. + * * @param namespace The namespace * @param connector The connector * @return An HTTP response */ public Mono> restart(Namespace namespace, Connector connector) { - return kafkaConnectClient.status(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) - .flatMap(status -> { - Flux> responses = Flux.fromIterable(status.tasks()) - .flatMap(task -> kafkaConnectClient.restart(namespace.getMetadata().getCluster(), - connector.getSpec().getConnectCluster(), connector.getMetadata().getName(), task.getId())) - .map(response -> { - log.info("Success restarting connector [{}] on namespace [{}] connect [{}]", - connector.getMetadata().getName(), - namespace.getMetadata().getName(), - connector.getSpec().getConnectCluster()); - return HttpResponse.ok(); - }); - - return Mono.from(responses); - }); + return kafkaConnectClient.status(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), + connector.getMetadata().getName()) + .flatMap(status -> { + Flux> responses = Flux.fromIterable(status.tasks()) + .flatMap(task -> kafkaConnectClient.restart(namespace.getMetadata().getCluster(), + connector.getSpec().getConnectCluster(), connector.getMetadata().getName(), task.getId())) + .map(response -> { + log.info("Success restarting connector [{}] on namespace [{}] connect [{}]", + connector.getMetadata().getName(), + namespace.getMetadata().getName(), + connector.getSpec().getConnectCluster()); + return HttpResponse.ok(); + }); + + return Mono.from(responses); + }); } /** - * Pause a given connector + * Pause a given connector. + * * @param namespace The namespace * @param connector The connector * @return An HTTP response @@ -250,18 +272,19 @@ public Mono> restart(Namespace namespace, Connector connector public Mono> pause(Namespace namespace, Connector connector) { return kafkaConnectClient.pause(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) - .map(pause -> { - log.info("Success pausing Connector [{}] on Namespace [{}] Connect [{}]", - connector.getMetadata().getName(), - namespace.getMetadata().getName(), - connector.getSpec().getConnectCluster()); - - return HttpResponse.accepted(); - }); + .map(pause -> { + log.info("Success pausing Connector [{}] on Namespace [{}] Connect [{}]", + connector.getMetadata().getName(), + namespace.getMetadata().getName(), + connector.getSpec().getConnectCluster()); + + return HttpResponse.accepted(); + }); } /** - * Resume a given connector + * Resume a given connector. + * * @param namespace The namespace * @param connector The connector * @return An HTTP response @@ -269,13 +292,13 @@ public Mono> pause(Namespace namespace, Connector connector) public Mono> resume(Namespace namespace, Connector connector) { return kafkaConnectClient.resume(namespace.getMetadata().getCluster(), connector.getSpec().getConnectCluster(), connector.getMetadata().getName()) - .map(resume -> { - log.info("Success resuming Connector [{}] on Namespace [{}] Connect [{}]", - connector.getMetadata().getName(), - namespace.getMetadata().getName(), - connector.getSpec().getConnectCluster()); - - return HttpResponse.accepted(); - }); + .map(resume -> { + log.info("Success resuming Connector [{}] on Namespace [{}] Connect [{}]", + connector.getMetadata().getName(), + namespace.getMetadata().getName(), + connector.getSpec().getConnectCluster()); + + return HttpResponse.accepted(); + }); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/ConsumerGroupService.java b/src/main/java/com/michelin/ns4kafka/services/ConsumerGroupService.java index 3431df9f..04bc5a34 100644 --- a/src/main/java/com/michelin/ns4kafka/services/ConsumerGroupService.java +++ b/src/main/java/com/michelin/ns4kafka/services/ConsumerGroupService.java @@ -9,8 +9,6 @@ import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import org.apache.kafka.common.TopicPartition; - import java.time.Duration; import java.time.Instant; import java.time.OffsetDateTime; @@ -21,7 +19,11 @@ import java.util.concurrent.ExecutionException; import java.util.regex.Pattern; import java.util.stream.Collectors; +import org.apache.kafka.common.TopicPartition; +/** + * Service to manage the consumer groups. + */ @Singleton public class ConsumerGroupService { @Inject @@ -31,17 +33,20 @@ public class ConsumerGroupService { AccessControlEntryService accessControlEntryService; /** - * Check if a given namespace is owner of a given group + * Check if a given namespace is owner of a given group. + * * @param namespace The namespace - * @param groupId The group + * @param groupId The group * @return true if it is, false otherwise */ public boolean isNamespaceOwnerOfConsumerGroup(String namespace, String groupId) { - return accessControlEntryService.isNamespaceOwnerOfResource(namespace, AccessControlEntry.ResourceType.GROUP, groupId); + return accessControlEntryService.isNamespaceOwnerOfResource(namespace, AccessControlEntry.ResourceType.GROUP, + groupId); } /** - * Validate the given reset offsets options + * Validate the given reset offsets options. + * * @param consumerGroupResetOffsets The reset offsets options * @return A list of validation errors */ @@ -51,35 +56,38 @@ public List validateResetOffsets(ConsumerGroupResetOffsets consumerGroup // allowed : *, , Pattern validTopicValue = Pattern.compile("^(\\*|[a-zA-Z0-9-_.]+(:[0-9]+)?)$"); if (!validTopicValue.matcher(consumerGroupResetOffsets.getSpec().getTopic()).matches()) { - validationErrors.add("Invalid topic name \"" + consumerGroupResetOffsets.getSpec().getTopic() + "\". Value must match [*, , ]."); + validationErrors.add("Invalid topic name \"" + consumerGroupResetOffsets.getSpec().getTopic() + + "\". Value must match [*, , ]."); } String options = consumerGroupResetOffsets.getSpec().getOptions(); switch (consumerGroupResetOffsets.getSpec().getMethod()) { - case SHIFT_BY: + case SHIFT_BY -> { try { Integer.parseInt(options); } catch (NumberFormatException e) { validationErrors.add("Invalid options \"" + options + "\". Value must be an integer."); } - break; - case BY_DURATION: + } + case BY_DURATION -> { try { Duration.parse(options); } catch (NullPointerException | DateTimeParseException e) { - validationErrors.add("Invalid options \"" + options + "\". Value must be an ISO 8601 Duration [ PnDTnHnMnS ]."); + validationErrors.add( + "Invalid options \"" + options + "\". Value must be an ISO 8601 Duration [ PnDTnHnMnS ]."); } - break; - case TO_DATETIME: + } + case TO_DATETIME -> { // OffsetDateTime is of format iso6801 with time zone try { OffsetDateTime.parse(options); } catch (Exception e) { - validationErrors.add("Invalid options \"" + options + "\". Value must be an ISO 8601 DateTime with Time zone [ yyyy-MM-dd'T'HH:mm:ss.SSSXXX ]."); + validationErrors.add("Invalid options \"" + options + + "\". Value must be an ISO 8601 DateTime with Time zone [ yyyy-MM-dd'T'HH:mm:ss.SSSXXX ]."); } - break; - case TO_OFFSET: + } + case TO_OFFSET -> { try { int offset = Integer.parseInt(options); if (offset < 0) { @@ -88,41 +96,45 @@ public List validateResetOffsets(ConsumerGroupResetOffsets consumerGroup } catch (NumberFormatException e) { validationErrors.add("Invalid options \"" + options + "\". Value must be an integer."); } - break; - case TO_LATEST: - case TO_EARLIEST: - default: + } + default -> { // Nothing to do - break; + } } return validationErrors; } /** - * Get the status of a given consumer group + * Get the status of a given consumer group. + * * @param namespace The namespace - * @param groupId The consumer group + * @param groupId The consumer group * @return The consumer group status - * @throws ExecutionException Any execution exception during consumer groups description + * @throws ExecutionException Any execution exception during consumer groups description * @throws InterruptedException Any interrupted exception during consumer groups description */ - public String getConsumerGroupStatus(Namespace namespace, String groupId) throws ExecutionException, InterruptedException { - ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = applicationContext.getBean(ConsumerGroupAsyncExecutor.class, + public String getConsumerGroupStatus(Namespace namespace, String groupId) + throws ExecutionException, InterruptedException { + ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = + applicationContext.getBean(ConsumerGroupAsyncExecutor.class, Qualifiers.byName(namespace.getMetadata().getCluster())); return consumerGroupAsyncExecutor.describeConsumerGroups(List.of(groupId)).get(groupId).state().toString(); } /** - * Get the partitions of a topic to reset + * Get the partitions of a topic to reset. + * * @param namespace The namespace - * @param groupId The consumer group of the topic to reset - * @param topic the topic to reset + * @param groupId The consumer group of the topic to reset + * @param topic the topic to reset * @return A list of topic partitions - * @throws ExecutionException Any execution exception during consumer groups description + * @throws ExecutionException Any execution exception during consumer groups description * @throws InterruptedException Any interrupted exception during consumer groups description */ - public List getPartitionsToReset(Namespace namespace, String groupId, String topic) throws InterruptedException, ExecutionException { - ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = applicationContext.getBean(ConsumerGroupAsyncExecutor.class, + public List getPartitionsToReset(Namespace namespace, String groupId, String topic) + throws InterruptedException, ExecutionException { + ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = + applicationContext.getBean(ConsumerGroupAsyncExecutor.class, Qualifiers.byName(namespace.getMetadata().getCluster())); if (topic.equals("*")) { @@ -136,55 +148,82 @@ public List getPartitionsToReset(Namespace namespace, String gro } /** - * From given options, compute the new offsets for given topic-partitions - * @param namespace The namespace - * @param groupId The consumer group - * @param options Given additional options for offsets reset + * From given options, compute the new offsets for given topic-partitions. + * + * @param namespace The namespace + * @param groupId The consumer group + * @param options Given additional options for offsets reset * @param partitionsToReset The list of partitions to reset - * @param method The method of offsets reset + * @param method The method of offsets reset * @return A map with new offsets for topic-partitions - * @throws ExecutionException Any execution exception during consumer groups description + * @throws ExecutionException Any execution exception during consumer groups description * @throws InterruptedException Any interrupted exception during consumer groups description */ - public Map prepareOffsetsToReset(Namespace namespace, String groupId, String options, List partitionsToReset, ResetOffsetsMethod method) throws InterruptedException, ExecutionException { - ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = applicationContext.getBean(ConsumerGroupAsyncExecutor.class, + public Map prepareOffsetsToReset(Namespace namespace, String groupId, String options, + List partitionsToReset, + ResetOffsetsMethod method) + throws InterruptedException, ExecutionException { + ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = + applicationContext.getBean(ConsumerGroupAsyncExecutor.class, Qualifiers.byName(namespace.getMetadata().getCluster())); switch (method) { - case SHIFT_BY: + case SHIFT_BY -> { int shiftBy = Integer.parseInt(options); - Map currentCommittedOffsets = consumerGroupAsyncExecutor.getCommittedOffsets(groupId); + Map currentCommittedOffsets = + consumerGroupAsyncExecutor.getCommittedOffsets(groupId); Map requestedOffsets = partitionsToReset.stream() - .map(topicPartition -> { - if (currentCommittedOffsets.containsKey(topicPartition)) { - return Map.entry(topicPartition, currentCommittedOffsets.get(topicPartition) + shiftBy); - } - throw new IllegalArgumentException("Cannot shift offset for partition " + topicPartition.toString() + " since there is no current committed offset"); - }) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .map(topicPartition -> { + if (currentCommittedOffsets.containsKey(topicPartition)) { + return Map.entry(topicPartition, currentCommittedOffsets.get(topicPartition) + shiftBy); + } + throw new IllegalArgumentException( + "Cannot shift offset for partition " + topicPartition.toString() + + " since there is no current committed offset"); + }) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); return consumerGroupAsyncExecutor.checkOffsetsRange(requestedOffsets); - case BY_DURATION: + } + case BY_DURATION -> { Duration duration = Duration.parse(options); - return consumerGroupAsyncExecutor.getLogTimestampOffsets(partitionsToReset, Instant.now().minus(duration).toEpochMilli()); - case TO_DATETIME: + return consumerGroupAsyncExecutor.getLogTimestampOffsets(partitionsToReset, + Instant.now().minus(duration).toEpochMilli()); + } + case TO_DATETIME -> { OffsetDateTime dateTime = OffsetDateTime.parse(options); - return consumerGroupAsyncExecutor.getLogTimestampOffsets(partitionsToReset, dateTime.toInstant().toEpochMilli()); - case TO_LATEST: + return consumerGroupAsyncExecutor.getLogTimestampOffsets(partitionsToReset, + dateTime.toInstant().toEpochMilli()); + } + case TO_LATEST -> { return consumerGroupAsyncExecutor.getLogEndOffsets(partitionsToReset); - case TO_EARLIEST: + } + case TO_EARLIEST -> { return consumerGroupAsyncExecutor.getLogStartOffsets(partitionsToReset); - case TO_OFFSET: + } + case TO_OFFSET -> { Map toRequestedOffset = partitionsToReset.stream() .map(topicPartition -> Map.entry(topicPartition, Long.parseLong(options))) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); return consumerGroupAsyncExecutor.checkOffsetsRange(toRequestedOffset); - default: - throw new IllegalArgumentException("Unreachable code"); + } + default -> throw new IllegalArgumentException("Unreachable code"); } } - public void alterConsumerGroupOffsets(Namespace namespace, String consumerGroupId, Map preparedOffsets) throws InterruptedException, ExecutionException { - ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = applicationContext.getBean(ConsumerGroupAsyncExecutor.class, + /** + * Alter the offsets of a given consumer group. + * + * @param namespace The namespace + * @param consumerGroupId The consumer group + * @param preparedOffsets The new offsets + * @throws InterruptedException Any interrupted exception during offsets alteration + * @throws ExecutionException Any execution exception during offsets alteration + */ + public void alterConsumerGroupOffsets(Namespace namespace, String consumerGroupId, + Map preparedOffsets) + throws InterruptedException, ExecutionException { + ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = + applicationContext.getBean(ConsumerGroupAsyncExecutor.class, Qualifiers.byName(namespace.getMetadata().getCluster())); consumerGroupAsyncExecutor.alterConsumerGroupOffsets(consumerGroupId, preparedOffsets); } diff --git a/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java b/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java index a9ccc385..a376eb59 100644 --- a/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java +++ b/src/main/java/com/michelin/ns4kafka/services/NamespaceService.java @@ -1,24 +1,25 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.NamespaceRepository; import jakarta.inject.Inject; import jakarta.inject.Singleton; - import java.util.ArrayList; import java.util.List; import java.util.Optional; -import java.util.stream.Collectors; import java.util.stream.Stream; +/** + * Service to manage the namespaces. + */ @Singleton public class NamespaceService { @Inject NamespaceRepository namespaceRepository; @Inject - List kafkaAsyncExecutorConfigList; + List managedClusterPropertiesList; @Inject TopicService topicService; @@ -32,86 +33,127 @@ public class NamespaceService { @Inject ConnectorService connectorService; + @Inject + ConnectClusterService connectClusterService; + + @Inject + ResourceQuotaService resourceQuotaService; + /** - * Validate new namespace creation + * Validate new namespace creation. + * * @param namespace The namespace to create * @return A list of validation errors */ public List validateCreation(Namespace namespace) { List validationErrors = new ArrayList<>(); - if (kafkaAsyncExecutorConfigList.stream().noneMatch(config -> config.getName().equals(namespace.getMetadata().getCluster()))) { - validationErrors.add("Invalid value " + namespace.getMetadata().getCluster() + " for cluster: Cluster doesn't exist"); + if (managedClusterPropertiesList.stream() + .noneMatch(config -> config.getName().equals(namespace.getMetadata().getCluster()))) { + validationErrors.add( + "Invalid value " + namespace.getMetadata().getCluster() + " for cluster: Cluster doesn't exist"); } if (namespaceRepository.findAllForCluster(namespace.getMetadata().getCluster()).stream() - .anyMatch(namespace1 -> namespace1.getSpec().getKafkaUser().equals(namespace.getSpec().getKafkaUser()))) { - validationErrors.add("Invalid value " + namespace.getSpec().getKafkaUser() + " for user: KafkaUser already exists"); + .anyMatch(namespace1 -> namespace1.getSpec().getKafkaUser().equals(namespace.getSpec().getKafkaUser()))) { + validationErrors.add( + "Invalid value " + namespace.getSpec().getKafkaUser() + " for user: KafkaUser already exists"); } return validationErrors; } /** - * Validate the Connect clusters of the namespace + * Validate the Connect clusters of the namespace. + * * @param namespace The namespace * @return A list of validation errors */ public List validate(Namespace namespace) { return namespace.getSpec().getConnectClusters() - .stream() - .filter(connectCluster -> !connectClusterExists(namespace.getMetadata().getCluster(), connectCluster)) - .map(s -> "Invalid value " + s + " for Connect Cluster: Connect Cluster doesn't exist") - .toList(); + .stream() + .filter(connectCluster -> !connectClusterExists(namespace.getMetadata().getCluster(), connectCluster)) + .map(s -> "Invalid value " + s + " for Connect Cluster: Connect Cluster doesn't exist") + .toList(); } /** - * Check if a given Connect cluster exists on a given Kafka cluster - * @param kafkaCluster The Kafka cluster + * Check if a given Connect cluster exists on a given Kafka cluster. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Connect cluster * @return true it does, false otherwise */ private boolean connectClusterExists(String kafkaCluster, String connectCluster) { - return kafkaAsyncExecutorConfigList.stream() - .anyMatch(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster) && - kafkaAsyncExecutorConfig.getConnects().containsKey(connectCluster)); + return managedClusterPropertiesList.stream() + .anyMatch(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster) + && kafkaAsyncExecutorConfig.getConnects().containsKey(connectCluster)); } + /** + * Find a namespace by name. + * + * @param namespace The namespace + * @return An optional namespace + */ public Optional findByName(String namespace) { return namespaceRepository.findByName(namespace); } + /** + * Create or update a namespace. + * + * @param namespace The namespace to create or update + * @return The created or updated namespace + */ public Namespace createOrUpdate(Namespace namespace) { return namespaceRepository.createNamespace(namespace); } + /** + * Delete a namespace. + * + * @param namespace The namespace to delete + */ public void delete(Namespace namespace) { namespaceRepository.delete(namespace); } + /** + * List all namespaces. + * + * @return The list of namespaces + */ public List listAll() { - return kafkaAsyncExecutorConfigList.stream() - .map(KafkaAsyncExecutorConfig::getName) - .flatMap(s -> namespaceRepository.findAllForCluster(s).stream()) - .toList(); + return managedClusterPropertiesList.stream() + .map(ManagedClusterProperties::getName) + .flatMap(s -> namespaceRepository.findAllForCluster(s).stream()) + .toList(); } - public List listAllNamespaceResources(Namespace namespace){ - //TODO rework xxxService implements NamespacedResourceService - // Inject List allServices - // allServices.flatMap(x->x.findAllForNamespace(ns).stream())... + /** + * List all resources of a namespace. + * + * @param namespace The namespace + * @return The list of resources + */ + public List listAllNamespaceResources(Namespace namespace) { return Stream.of( topicService.findAllForNamespace(namespace).stream() - .map(topic -> topic.getKind()+"/"+topic.getMetadata().getName()), + .map(topic -> topic.getKind() + "/" + topic.getMetadata().getName()), connectorService.findAllForNamespace(namespace).stream() - .map(connector -> connector.getKind()+"/"+connector.getMetadata().getName()), + .map(connector -> connector.getKind() + "/" + connector.getMetadata().getName()), + connectClusterService.findAllByNamespaceOwner(namespace).stream() + .map(connectCluster -> connectCluster.getKind() + "/" + connectCluster.getMetadata().getName()), accessControlEntryService.findAllForNamespace(namespace).stream() - .map(ace -> ace.getKind()+"/"+ace.getMetadata().getName()), + .map(ace -> ace.getKind() + "/" + ace.getMetadata().getName()), + resourceQuotaService.findByNamespace(namespace.getMetadata().getName()).stream() + .map(resourceQuota -> resourceQuota.getKind() + "/" + resourceQuota.getMetadata().getName()), roleBindingService.list(namespace.getMetadata().getName()).stream() - .map(roleBinding -> roleBinding.getKind()+"/"+roleBinding.getMetadata().getName()) - ) - .reduce(Stream::concat) - .orElseGet(Stream::empty) - .toList(); + .map(roleBinding -> roleBinding.getKind() + "/" + roleBinding.getMetadata().getName()) + ) + .reduce(Stream::concat) + .orElseGet(Stream::empty) + .toList(); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java b/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java index 5e7a8825..0a249902 100644 --- a/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java +++ b/src/main/java/com/michelin/ns4kafka/services/ResourceQuotaService.java @@ -1,5 +1,17 @@ package com.michelin.ns4kafka.services; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.COUNT_CONNECTORS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.COUNT_PARTITIONS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.COUNT_TOPICS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.DISK_TOPICS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.USER_CONSUMER_BYTE_RATE; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.USER_PRODUCER_BYTE_RATE; +import static com.michelin.ns4kafka.utils.BytesUtils.BYTE; +import static com.michelin.ns4kafka.utils.BytesUtils.GIBIBYTE; +import static com.michelin.ns4kafka.utils.BytesUtils.KIBIBYTE; +import static com.michelin.ns4kafka.utils.BytesUtils.MEBIBYTE; +import static org.apache.kafka.common.config.TopicConfig.RETENTION_BYTES_CONFIG; + import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; @@ -11,16 +23,14 @@ import io.micronaut.core.util.StringUtils; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; - import java.util.ArrayList; import java.util.List; import java.util.Optional; +import lombok.extern.slf4j.Slf4j; -import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.*; -import static com.michelin.ns4kafka.utils.BytesUtils.*; -import static org.apache.kafka.common.config.TopicConfig.RETENTION_BYTES_CONFIG; - +/** + * Service to manage resource quotas. + */ @Slf4j @Singleton public class ResourceQuotaService { @@ -31,10 +41,7 @@ public class ResourceQuotaService { private static final String USER_QUOTA_RESPONSE_FORMAT = "%sB/s"; private static final String NO_QUOTA_RESPONSE_FORMAT = "%s"; - - @Inject - NamespaceService namespaceService; - + @Inject ResourceQuotaRepository resourceQuotaRepository; @@ -45,7 +52,8 @@ public class ResourceQuotaService { ConnectorService connectorService; /** - * Find a resource quota by namespace + * Find a resource quota by namespace. + * * @param namespace The namespace used to research * @return The researched resource quota */ @@ -54,27 +62,32 @@ public Optional findByNamespace(String namespace) { } /** - * Find a resource quota by namespace and name + * Find a resource quota by namespace and name. + * * @param namespace The namespace - * @param quota The quota name + * @param quota The quota name * @return The researched resource quota */ public Optional findByName(String namespace, String quota) { return findByNamespace(namespace) - .stream() - .filter(resourceQuota -> resourceQuota.getMetadata().getName().equals(quota)) - .findFirst(); + .stream() + .filter(resourceQuota -> resourceQuota.getMetadata().getName().equals(quota)) + .findFirst(); } /** - * Create a resource quota + * Create a resource quota. + * * @param resourceQuota The resource quota to create * @return The created resource quota */ - public ResourceQuota create(ResourceQuota resourceQuota) { return resourceQuotaRepository.create(resourceQuota); } + public ResourceQuota create(ResourceQuota resourceQuota) { + return resourceQuotaRepository.create(resourceQuota); + } /** - * Delete a resource quota + * Delete a resource quota. + * * @param resourceQuota The resource quota to delete */ public void delete(ResourceQuota resourceQuota) { @@ -82,8 +95,9 @@ public void delete(ResourceQuota resourceQuota) { } /** - * Validate a given new resource quota against the current resource used by the namespace - * @param namespace The namespace + * Validate a given new resource quota against the current resource used by the namespace. + * + * @param namespace The namespace * @param resourceQuota The new resource quota * @return A list of validation errors */ @@ -108,15 +122,16 @@ public List validateNewResourceQuota(Namespace namespace, ResourceQuota if (StringUtils.hasText(resourceQuota.getSpec().get(DISK_TOPICS.getKey()))) { String limitAsString = resourceQuota.getSpec().get(DISK_TOPICS.getKey()); - if (!limitAsString.endsWith(BYTE) && !limitAsString.endsWith(KIBIBYTE) && !limitAsString.endsWith(MEBIBYTE) && !limitAsString.endsWith(GIBIBYTE)) { + if (!limitAsString.endsWith(BYTE) && !limitAsString.endsWith(KIBIBYTE) + && !limitAsString.endsWith(MEBIBYTE) && !limitAsString.endsWith(GIBIBYTE)) { errors.add(String.format("Invalid value for %s: value must end with either %s, %s, %s or %s", - DISK_TOPICS, BYTE, KIBIBYTE, MEBIBYTE, GIBIBYTE)); + DISK_TOPICS, BYTE, KIBIBYTE, MEBIBYTE, GIBIBYTE)); } else { long used = getCurrentDiskTopicsByNamespace(namespace); long limit = BytesUtils.humanReadableToBytes(limitAsString); if (used > limit) { errors.add(String.format(QUOTA_ALREADY_EXCEEDED_ERROR, DISK_TOPICS, - BytesUtils.bytesToHumanReadable(used), limitAsString)); + BytesUtils.bytesToHumanReadable(used), limitAsString)); } } } @@ -151,7 +166,8 @@ public List validateNewResourceQuota(Namespace namespace, ResourceQuota } /** - * Get currently used number of topics by namespace + * Get currently used number of topics by namespace. + * * @param namespace The namespace * @return The number of topics */ @@ -160,33 +176,36 @@ public long getCurrentCountTopicsByNamespace(Namespace namespace) { } /** - * Get currently used number of partitions by namespace + * Get currently used number of partitions by namespace. + * * @param namespace The namespace * @return The number of partitions */ public long getCurrentCountPartitionsByNamespace(Namespace namespace) { return topicService.findAllForNamespace(namespace) - .stream() - .map(topic -> topic.getSpec().getPartitions()) - .reduce(0, Integer::sum) - .longValue(); + .stream() + .map(topic -> topic.getSpec().getPartitions()) + .reduce(0, Integer::sum) + .longValue(); } /** - * Get currently used topic disk in bytes by namespace + * Get currently used topic disk in bytes by namespace. + * * @param namespace The namespace * @return The number of topic disk */ public long getCurrentDiskTopicsByNamespace(Namespace namespace) { return topicService.findAllForNamespace(namespace) - .stream() - .map(topic -> Long.parseLong(topic.getSpec().getConfigs().getOrDefault("retention.bytes", "0")) * - topic.getSpec().getPartitions()) - .reduce(0L, Long::sum); + .stream() + .map(topic -> Long.parseLong(topic.getSpec().getConfigs().getOrDefault("retention.bytes", "0")) + * topic.getSpec().getPartitions()) + .reduce(0L, Long::sum); } /** - * Get currently used number of connectors by namespace + * Get currently used number of connectors by namespace. + * * @param namespace The namespace * @return The number of connectors */ @@ -195,10 +214,11 @@ public long getCurrentCountConnectorsByNamespace(Namespace namespace) { } /** - * Validate the topic quota - * @param namespace The namespace + * Validate the topic quota. + * + * @param namespace The namespace * @param existingTopic The existing topic - * @param newTopic The new topic + * @param newTopic The new topic * @return A list of errors */ public List validateTopicQuota(Namespace namespace, Optional existingTopic, Topic newTopic) { @@ -216,7 +236,9 @@ public List validateTopicQuota(Namespace namespace, Optional exis long used = getCurrentCountTopicsByNamespace(namespace); long limit = Long.parseLong(resourceQuota.getSpec().get(COUNT_TOPICS.getKey())); if (used + 1 > limit) { - errors.add(String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add 1 topic.", COUNT_TOPICS, used, limit)); + errors.add( + String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add 1 topic.", COUNT_TOPICS, + used, limit)); } } @@ -224,35 +246,39 @@ public List validateTopicQuota(Namespace namespace, Optional exis long used = getCurrentCountPartitionsByNamespace(namespace); long limit = Long.parseLong(resourceQuota.getSpec().get(COUNT_PARTITIONS.getKey())); if (used + newTopic.getSpec().getPartitions() > limit) { - errors.add(String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add %s partition(s).", COUNT_PARTITIONS, used, limit, newTopic.getSpec().getPartitions())); + errors.add(String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add %s partition(s).", + COUNT_PARTITIONS, used, limit, newTopic.getSpec().getPartitions())); } } } - if (StringUtils.hasText(resourceQuota.getSpec().get(DISK_TOPICS.getKey())) && - StringUtils.hasText(newTopic.getSpec().getConfigs().get(RETENTION_BYTES_CONFIG))) { + if (StringUtils.hasText(resourceQuota.getSpec().get(DISK_TOPICS.getKey())) + && StringUtils.hasText(newTopic.getSpec().getConfigs().get(RETENTION_BYTES_CONFIG))) { long used = getCurrentDiskTopicsByNamespace(namespace); long limit = BytesUtils.humanReadableToBytes(resourceQuota.getSpec().get(DISK_TOPICS.getKey())); - long newTopicSize = Long.parseLong(newTopic.getSpec().getConfigs().get(RETENTION_BYTES_CONFIG)) * newTopic.getSpec().getPartitions(); + long newTopicSize = Long.parseLong(newTopic.getSpec().getConfigs().get(RETENTION_BYTES_CONFIG)) + * newTopic.getSpec().getPartitions(); long existingTopicSize = existingTopic - .map(value -> Long.parseLong(value.getSpec().getConfigs().getOrDefault(RETENTION_BYTES_CONFIG, "0")) - * value.getSpec().getPartitions()) - .orElse(0L); + .map(value -> Long.parseLong(value.getSpec().getConfigs().getOrDefault(RETENTION_BYTES_CONFIG, "0")) + * value.getSpec().getPartitions()) + .orElse(0L); long bytesToAdd = newTopicSize - existingTopicSize; if (bytesToAdd > 0 && used + bytesToAdd > limit) { - errors.add(String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add %s of data.", DISK_TOPICS, - BytesUtils.bytesToHumanReadable(used), BytesUtils.bytesToHumanReadable(limit), BytesUtils.bytesToHumanReadable(bytesToAdd))); + errors.add( + String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add %s of data.", DISK_TOPICS, + BytesUtils.bytesToHumanReadable(used), BytesUtils.bytesToHumanReadable(limit), + BytesUtils.bytesToHumanReadable(bytesToAdd))); } - } return errors; } /** - * Validate the connector quota + * Validate the connector quota. + * * @param namespace The namespace * @return A list of errors */ @@ -269,7 +295,8 @@ public List validateConnectorQuota(Namespace namespace) { long used = getCurrentCountConnectorsByNamespace(namespace); long limit = Long.parseLong(resourceQuota.getSpec().get(COUNT_CONNECTORS.getKey())); if (used + 1 > limit) { - errors.add(String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add 1 connector.", COUNT_CONNECTORS, used, limit)); + errors.add(String.format("Exceeding quota for %s: %s/%s (used/limit). Cannot add 1 connector.", + COUNT_CONNECTORS, used, limit)); } } @@ -277,81 +304,100 @@ public List validateConnectorQuota(Namespace namespace) { } /** - * Get the current consumed resources against the current quota of the given namespace to a response + * Get the current consumed resources against the current quota of the given namespace to a response. + * * @return A list of quotas as response format */ - public List getUsedResourcesByQuotaForAllNamespaces() { - return namespaceService.listAll() - .stream() - .map(namespace -> getUsedResourcesByQuotaByNamespace(namespace, findByNamespace(namespace.getMetadata().getName()))) - .toList(); + public List getUsedQuotaByNamespaces(List namespaces) { + return namespaces + .stream() + .map(namespace -> getUsedResourcesByQuotaByNamespace(namespace, + findByNamespace(namespace.getMetadata().getName()))) + .toList(); } /** - * Map current consumed resources and current quota of the given namespace to a response - * @param namespace The namespace + * Map current consumed resources and current quota of the given namespace to a response. + * + * @param namespace The namespace * @param resourceQuota The quota to map * @return A list of quotas as response format */ - public ResourceQuotaResponse getUsedResourcesByQuotaByNamespace(Namespace namespace, Optional resourceQuota) { + public ResourceQuotaResponse getUsedResourcesByQuotaByNamespace(Namespace namespace, + Optional resourceQuota) { long currentCountTopic = getCurrentCountTopicsByNamespace(namespace); long currentCountPartition = getCurrentCountPartitionsByNamespace(namespace); long currentDiskTopic = getCurrentDiskTopicsByNamespace(namespace); long currentCountConnector = getCurrentCountConnectorsByNamespace(namespace); return formatUsedResourceByQuotaResponse(namespace, currentCountTopic, currentCountPartition, currentDiskTopic, - currentCountConnector, resourceQuota); + currentCountConnector, resourceQuota); } /** - * Map given consumed resources and current quota to a response - * @param namespace The namespace - * @param currentCountTopic The current number of topics + * Map given consumed resources and current quota to a response. + * + * @param namespace The namespace + * @param currentCountTopic The current number of topics * @param currentCountPartition The current number of partitions - * @param currentDiskTopic The current number of disk space used by topics + * @param currentDiskTopic The current number of disk space used by topics * @param currentCountConnector The current number of connectors - * @param resourceQuota The quota to map + * @param resourceQuota The quota to map * @return A list of quotas as response format */ - public ResourceQuotaResponse formatUsedResourceByQuotaResponse(Namespace namespace, long currentCountTopic, long currentCountPartition, long currentDiskTopic, - long currentCountConnector, Optional resourceQuota) { - String countTopic = resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(COUNT_TOPICS.getKey())) ? - String.format(QUOTA_RESPONSE_FORMAT, currentCountTopic, resourceQuota.get().getSpec().get(COUNT_TOPICS.getKey())) : + public ResourceQuotaResponse formatUsedResourceByQuotaResponse(Namespace namespace, long currentCountTopic, + long currentCountPartition, long currentDiskTopic, + long currentCountConnector, + Optional resourceQuota) { + String countTopic = + resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(COUNT_TOPICS.getKey())) + ? String.format(QUOTA_RESPONSE_FORMAT, currentCountTopic, + resourceQuota.get().getSpec().get(COUNT_TOPICS.getKey())) : String.format(NO_QUOTA_RESPONSE_FORMAT, currentCountTopic); - String countPartition = resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(COUNT_PARTITIONS.getKey())) ? - String.format(QUOTA_RESPONSE_FORMAT, currentCountPartition, resourceQuota.get().getSpec().get(COUNT_PARTITIONS.getKey())) : - String.format(NO_QUOTA_RESPONSE_FORMAT, currentCountPartition); + String countPartition = resourceQuota.isPresent() + && StringUtils.hasText(resourceQuota.get().getSpec().get(COUNT_PARTITIONS.getKey())) + ? String.format(QUOTA_RESPONSE_FORMAT, currentCountPartition, + resourceQuota.get().getSpec().get(COUNT_PARTITIONS.getKey())) : + String.format(NO_QUOTA_RESPONSE_FORMAT, currentCountPartition); - String diskTopic = resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(DISK_TOPICS.getKey())) ? - String.format(QUOTA_RESPONSE_FORMAT, BytesUtils.bytesToHumanReadable(currentDiskTopic), resourceQuota.get().getSpec().get(DISK_TOPICS.getKey())) : + String diskTopic = + resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(DISK_TOPICS.getKey())) + ? String.format(QUOTA_RESPONSE_FORMAT, BytesUtils.bytesToHumanReadable(currentDiskTopic), + resourceQuota.get().getSpec().get(DISK_TOPICS.getKey())) : String.format(NO_QUOTA_RESPONSE_FORMAT, BytesUtils.bytesToHumanReadable(currentDiskTopic)); - String countConnector = resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(COUNT_CONNECTORS.getKey())) ? - String.format(QUOTA_RESPONSE_FORMAT, currentCountConnector, resourceQuota.get().getSpec().get(COUNT_CONNECTORS.getKey())) : - String.format(NO_QUOTA_RESPONSE_FORMAT, currentCountConnector); + String countConnector = resourceQuota.isPresent() + && StringUtils.hasText(resourceQuota.get().getSpec().get(COUNT_CONNECTORS.getKey())) + ? String.format(QUOTA_RESPONSE_FORMAT, currentCountConnector, + resourceQuota.get().getSpec().get(COUNT_CONNECTORS.getKey())) : + String.format(NO_QUOTA_RESPONSE_FORMAT, currentCountConnector); - String consumerByteRate = resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(USER_CONSUMER_BYTE_RATE.getKey())) ? - String.format(USER_QUOTA_RESPONSE_FORMAT, resourceQuota.get().getSpec().get(USER_CONSUMER_BYTE_RATE.getKey())) : - String.format(USER_QUOTA_RESPONSE_FORMAT, UserAsyncExecutor.BYTE_RATE_DEFAULT_VALUE); + String consumerByteRate = resourceQuota.isPresent() + && StringUtils.hasText(resourceQuota.get().getSpec().get(USER_CONSUMER_BYTE_RATE.getKey())) + ? String.format(USER_QUOTA_RESPONSE_FORMAT, + resourceQuota.get().getSpec().get(USER_CONSUMER_BYTE_RATE.getKey())) : + String.format(USER_QUOTA_RESPONSE_FORMAT, UserAsyncExecutor.BYTE_RATE_DEFAULT_VALUE); - String producerByteRate = resourceQuota.isPresent() && StringUtils.hasText(resourceQuota.get().getSpec().get(USER_PRODUCER_BYTE_RATE.getKey())) ? - String.format(USER_QUOTA_RESPONSE_FORMAT, resourceQuota.get().getSpec().get(USER_PRODUCER_BYTE_RATE.getKey())) : - String.format(USER_QUOTA_RESPONSE_FORMAT, UserAsyncExecutor.BYTE_RATE_DEFAULT_VALUE); + String producerByteRate = resourceQuota.isPresent() + && StringUtils.hasText(resourceQuota.get().getSpec().get(USER_PRODUCER_BYTE_RATE.getKey())) + ? String.format(USER_QUOTA_RESPONSE_FORMAT, + resourceQuota.get().getSpec().get(USER_PRODUCER_BYTE_RATE.getKey())) : + String.format(USER_QUOTA_RESPONSE_FORMAT, UserAsyncExecutor.BYTE_RATE_DEFAULT_VALUE); return ResourceQuotaResponse.builder() - .metadata(resourceQuota.map(ResourceQuota::getMetadata).orElse(ObjectMeta.builder() - .namespace(namespace.getMetadata().getName()) - .cluster(namespace.getMetadata().getCluster()) - .build())) - .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() - .countTopic(countTopic) - .countPartition(countPartition) - .diskTopic(diskTopic) - .countConnector(countConnector) - .consumerByteRate(consumerByteRate) - .producerByteRate(producerByteRate) - .build()) - .build(); + .metadata(resourceQuota.map(ResourceQuota::getMetadata).orElse(ObjectMeta.builder() + .namespace(namespace.getMetadata().getName()) + .cluster(namespace.getMetadata().getCluster()) + .build())) + .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() + .countTopic(countTopic) + .countPartition(countPartition) + .diskTopic(diskTopic) + .countConnector(countConnector) + .consumerByteRate(consumerByteRate) + .producerByteRate(producerByteRate) + .build()) + .build(); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/RoleBindingService.java b/src/main/java/com/michelin/ns4kafka/services/RoleBindingService.java index 9fe3c1c5..531cb85a 100644 --- a/src/main/java/com/michelin/ns4kafka/services/RoleBindingService.java +++ b/src/main/java/com/michelin/ns4kafka/services/RoleBindingService.java @@ -4,18 +4,21 @@ import com.michelin.ns4kafka.repositories.RoleBindingRepository; import jakarta.inject.Inject; import jakarta.inject.Singleton; - import java.util.Collection; import java.util.List; import java.util.Optional; +/** + * Service to manage role bindings. + */ @Singleton public class RoleBindingService { @Inject RoleBindingRepository roleBindingRepository; /** - * Delete a role binding + * Delete a role binding. + * * @param roleBinding The role binding to delete */ public void delete(RoleBinding roleBinding) { @@ -23,35 +26,45 @@ public void delete(RoleBinding roleBinding) { } /** - * Create a role binding + * Create a role binding. + * * @param roleBinding The role binding to create */ - public void create(RoleBinding roleBinding) { roleBindingRepository.create(roleBinding); } + public void create(RoleBinding roleBinding) { + roleBindingRepository.create(roleBinding); + } /** - * List role bindings by namespace + * List role bindings by namespace. + * * @param namespace The namespace used to research * @return The list of associated role bindings */ - public List list(String namespace) { return roleBindingRepository.findAllForNamespace(namespace); } + public List list(String namespace) { + return roleBindingRepository.findAllForNamespace(namespace); + } /** - * List role bindings by groups + * List role bindings by groups. + * * @param groups The groups used to research * @return The list of associated role bindings */ - public List listByGroups(Collection groups) { return roleBindingRepository.findAllForGroups(groups); } + public List listByGroups(Collection groups) { + return roleBindingRepository.findAllForGroups(groups); + } /** - * Find a role binding by name + * Find a role binding by name. + * * @param namespace The namespace used to research - * @param name The role binding name + * @param name The role binding name * @return The researched role binding */ public Optional findByName(String namespace, String name) { return list(namespace) - .stream() - .filter(t -> t.getMetadata().getName().equals(name)) - .findFirst(); + .stream() + .filter(t -> t.getMetadata().getName().equals(name)) + .findFirst(); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/SchemaService.java b/src/main/java/com/michelin/ns4kafka/services/SchemaService.java index 1fc94645..f829148b 100644 --- a/src/main/java/com/michelin/ns4kafka/services/SchemaService.java +++ b/src/main/java/com/michelin/ns4kafka/services/SchemaService.java @@ -12,13 +12,15 @@ import com.michelin.ns4kafka.services.clients.schema.entities.SchemaResponse; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import java.util.List; +import java.util.Optional; import lombok.extern.slf4j.Slf4j; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; -import java.util.List; -import java.util.Optional; - +/** + * Service to manage schemas. + */ @Slf4j @Singleton public class SchemaService { @@ -29,158 +31,164 @@ public class SchemaService { SchemaRegistryClient schemaRegistryClient; /** - * Get all the schemas by namespace + * Get all the schemas by namespace. + * * @param namespace The namespace * @return A list of schemas */ public Flux findAllForNamespace(Namespace namespace) { List acls = accessControlEntryService.findAllGrantedToNamespace(namespace).stream() - .filter(acl -> acl.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) - .filter(acl -> acl.getSpec().getResourceType() == AccessControlEntry.ResourceType.TOPIC).toList(); + .filter(acl -> acl.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) + .filter(acl -> acl.getSpec().getResourceType() == AccessControlEntry.ResourceType.TOPIC).toList(); return schemaRegistryClient - .getSubjects(namespace.getMetadata().getCluster()) - .filter(subject -> { - String underlyingTopicName = subject.replaceAll("(-key|-value)$",""); - - return acls.stream().anyMatch(accessControlEntry -> switch (accessControlEntry.getSpec().getResourcePatternType()) { - case PREFIXED -> - underlyingTopicName.startsWith(accessControlEntry.getSpec().getResource()); - case LITERAL -> - underlyingTopicName.equals(accessControlEntry.getSpec().getResource()); + .getSubjects(namespace.getMetadata().getCluster()) + .filter(subject -> { + String underlyingTopicName = subject.replaceAll("(-key|-value)$", ""); + + return acls.stream() + .anyMatch(accessControlEntry -> switch (accessControlEntry.getSpec().getResourcePatternType()) { + case PREFIXED -> underlyingTopicName.startsWith(accessControlEntry.getSpec().getResource()); + case LITERAL -> underlyingTopicName.equals(accessControlEntry.getSpec().getResource()); }); - }) - .map(subject -> SchemaList.builder() - .metadata(ObjectMeta.builder() - .cluster(namespace.getMetadata().getCluster()) - .namespace(namespace.getMetadata().getName()) - .name(subject) - .build()) - .build()); + }) + .map(subject -> SchemaList.builder() + .metadata(ObjectMeta.builder() + .cluster(namespace.getMetadata().getCluster()) + .namespace(namespace.getMetadata().getName()) + .name(subject) + .build()) + .build()); } /** - * Get the last version of a schema by namespace and subject + * Get the last version of a schema by namespace and subject. * * @param namespace The namespace - * @param subject The subject + * @param subject The subject * @return A schema */ public Mono getLatestSubject(Namespace namespace, String subject) { return schemaRegistryClient - .getLatestSubject(namespace.getMetadata().getCluster(), subject) - .flatMap(latestSubjectOptional -> schemaRegistryClient - .getCurrentCompatibilityBySubject(namespace.getMetadata().getCluster(), subject) - .map(Optional::of) - .defaultIfEmpty(Optional.empty()) - .map(currentCompatibilityOptional -> { - Schema.Compatibility compatibility = currentCompatibilityOptional.isPresent() ? currentCompatibilityOptional.get().compatibilityLevel() : Schema.Compatibility.GLOBAL; + .getLatestSubject(namespace.getMetadata().getCluster(), subject) + .flatMap(latestSubjectOptional -> schemaRegistryClient + .getCurrentCompatibilityBySubject(namespace.getMetadata().getCluster(), subject) + .map(Optional::of) + .defaultIfEmpty(Optional.empty()) + .map(currentCompatibilityOptional -> { + Schema.Compatibility compatibility = currentCompatibilityOptional.isPresent() + ? currentCompatibilityOptional.get().compatibilityLevel() : Schema.Compatibility.GLOBAL; - return Schema.builder() - .metadata(ObjectMeta.builder() - .cluster(namespace.getMetadata().getCluster()) - .namespace(namespace.getMetadata().getName()) - .name(latestSubjectOptional.subject()) - .build()) - .spec(Schema.SchemaSpec.builder() - .id(latestSubjectOptional.id()) - .version(latestSubjectOptional.version()) - .compatibility(compatibility) - .schema(latestSubjectOptional.schema()) - .schemaType(latestSubjectOptional.schemaType() == null ? Schema.SchemaType.AVRO : - Schema.SchemaType.valueOf(latestSubjectOptional.schemaType())) - .build()) - .build(); - })); + return Schema.builder() + .metadata(ObjectMeta.builder() + .cluster(namespace.getMetadata().getCluster()) + .namespace(namespace.getMetadata().getName()) + .name(latestSubjectOptional.subject()) + .build()) + .spec(Schema.SchemaSpec.builder() + .id(latestSubjectOptional.id()) + .version(latestSubjectOptional.version()) + .compatibility(compatibility) + .schema(latestSubjectOptional.schema()) + .schemaType(latestSubjectOptional.schemaType() == null ? Schema.SchemaType.AVRO : + Schema.SchemaType.valueOf(latestSubjectOptional.schemaType())) + .build()) + .build(); + })); } /** - * Publish a schema + * Publish a schema. * * @param namespace The namespace - * @param schema The schema to create + * @param schema The schema to create * @return The ID of the created schema */ public Mono register(Namespace namespace, Schema schema) { - return schemaRegistryClient. - register(namespace.getMetadata().getCluster(), - schema.getMetadata().getName(), SchemaRequest.builder() - .schemaType(String.valueOf(schema.getSpec().getSchemaType())) - .schema(schema.getSpec().getSchema()) - .references(schema.getSpec().getReferences()) - .build()) - .map(SchemaResponse::id); + return schemaRegistryClient + .register(namespace.getMetadata().getCluster(), + schema.getMetadata().getName(), SchemaRequest.builder() + .schemaType(String.valueOf(schema.getSpec().getSchemaType())) + .schema(schema.getSpec().getSchema()) + .references(schema.getSpec().getReferences()) + .build()) + .map(SchemaResponse::id); } /** - * Delete all schemas under the given subject + * Delete all schemas under the given subject. + * * @param namespace The current namespace - * @param subject The current subject to delete + * @param subject The current subject to delete * @return The list of deleted versions */ public Mono deleteSubject(Namespace namespace, String subject) { return schemaRegistryClient - .deleteSubject(namespace.getMetadata().getCluster(), subject, false) - .flatMap(ids -> schemaRegistryClient. - deleteSubject(namespace.getMetadata().getCluster(), - subject, true)); + .deleteSubject(namespace.getMetadata().getCluster(), subject, false) + .flatMap(ids -> schemaRegistryClient + .deleteSubject(namespace.getMetadata().getCluster(), + subject, true)); } /** - * Validate the schema compatibility + * Validate the schema compatibility. * * @param cluster The cluster - * @param schema The schema to validate + * @param schema The schema to validate * @return A list of errors */ public Mono> validateSchemaCompatibility(String cluster, Schema schema) { - return schemaRegistryClient.validateSchemaCompatibility(cluster, schema.getMetadata().getName(), SchemaRequest.builder() - .schemaType(String.valueOf(schema.getSpec().getSchemaType())) - .schema(schema.getSpec().getSchema()) - .references(schema.getSpec().getReferences()) - .build()) - .map(Optional::of) - .defaultIfEmpty(Optional.empty()) - .map(schemaCompatibilityCheckOptional -> { - if (schemaCompatibilityCheckOptional.isEmpty()) { - return List.of(); - } + return schemaRegistryClient.validateSchemaCompatibility(cluster, schema.getMetadata().getName(), + SchemaRequest.builder() + .schemaType(String.valueOf(schema.getSpec().getSchemaType())) + .schema(schema.getSpec().getSchema()) + .references(schema.getSpec().getReferences()) + .build()) + .map(Optional::of) + .defaultIfEmpty(Optional.empty()) + .map(schemaCompatibilityCheckOptional -> { + if (schemaCompatibilityCheckOptional.isEmpty()) { + return List.of(); + } - if (!schemaCompatibilityCheckOptional.get().isCompatible()) { - return schemaCompatibilityCheckOptional.get().messages(); - } + if (!schemaCompatibilityCheckOptional.get().isCompatible()) { + return schemaCompatibilityCheckOptional.get().messages(); + } - return List.of(); - }); + return List.of(); + }); } /** - * Update the compatibility of a subject + * Update the compatibility of a subject. * - * @param namespace The namespace - * @param schema The schema + * @param namespace The namespace + * @param schema The schema * @param compatibility The compatibility to apply */ - public Mono updateSubjectCompatibility(Namespace namespace, Schema schema, Schema.Compatibility compatibility) { + public Mono updateSubjectCompatibility(Namespace namespace, Schema schema, + Schema.Compatibility compatibility) { if (compatibility.equals(Schema.Compatibility.GLOBAL)) { - return schemaRegistryClient.deleteCurrentCompatibilityBySubject(namespace.getMetadata().getCluster(), schema.getMetadata().getName()); + return schemaRegistryClient.deleteCurrentCompatibilityBySubject(namespace.getMetadata().getCluster(), + schema.getMetadata().getName()); } else { return schemaRegistryClient.updateSubjectCompatibility(namespace.getMetadata().getCluster(), - schema.getMetadata().getName(), SchemaCompatibilityRequest.builder() - .compatibility(compatibility.toString()).build()); + schema.getMetadata().getName(), SchemaCompatibilityRequest.builder() + .compatibility(compatibility.toString()).build()); } } /** - * Does the namespace is owner of the given schema + * Does the namespace is owner of the given schema. * - * @param namespace The namespace + * @param namespace The namespace * @param subjectName The name of the subject * @return true if it's owner, false otherwise */ public boolean isNamespaceOwnerOfSubject(Namespace namespace, String subjectName) { - String underlyingTopicName = subjectName.replaceAll("(-key|-value)$",""); - return accessControlEntryService.isNamespaceOwnerOfResource(namespace.getMetadata().getName(), AccessControlEntry.ResourceType.TOPIC, - underlyingTopicName); + String underlyingTopicName = subjectName.replaceAll("(-key|-value)$", ""); + return accessControlEntryService.isNamespaceOwnerOfResource(namespace.getMetadata().getName(), + AccessControlEntry.ResourceType.TOPIC, + underlyingTopicName); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/StreamService.java b/src/main/java/com/michelin/ns4kafka/services/StreamService.java index fdaecb6f..b115147f 100644 --- a/src/main/java/com/michelin/ns4kafka/services/StreamService.java +++ b/src/main/java/com/michelin/ns4kafka/services/StreamService.java @@ -9,11 +9,13 @@ import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; - +import java.util.HashSet; import java.util.List; import java.util.Optional; -import java.util.stream.Collectors; +/** + * Service to manage Kafka Streams. + */ @Singleton public class StreamService { @Inject @@ -26,20 +28,22 @@ public class StreamService { ApplicationContext applicationContext; /** - * Find all Kafka Streams by given namespace + * Find all Kafka Streams by given namespace. + * * @param namespace The namespace * @return A list of Kafka Streams */ public List findAllForNamespace(Namespace namespace) { return streamRepository.findAllForCluster(namespace.getMetadata().getCluster()).stream() .filter(stream -> stream.getMetadata().getNamespace().equals(namespace.getMetadata().getName())) - .toList(); + .toList(); } /** - * Find a Kafka Streams by namespace and name + * Find a Kafka Streams by namespace and name. + * * @param namespace The namespace - * @param stream The Kafka Streams name + * @param stream The Kafka Streams name * @return An optional Kafka Streams */ public Optional findByName(Namespace namespace, String stream) { @@ -49,26 +53,30 @@ public Optional findByName(Namespace namespace, String stream) { } /** - * Is given namespace owner of the given Kafka Streams + * Is given namespace owner of the given Kafka Streams. + * * @param namespace The namespace - * @param resource The Kafka Streams + * @param resource The Kafka Streams * @return true if it is, false otherwise */ public boolean isNamespaceOwnerOfKafkaStream(Namespace namespace, String resource) { // KafkaStream Ownership is determined by both Topic and Group ownership on PREFIXED resource, // this is because KafkaStream application.id is a consumer group but also a prefix for internal topic names - return accessControlEntryService.findAllGrantedToNamespace(namespace) - .stream() - .filter(accessControlEntry -> accessControlEntry.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) - .filter(accessControlEntry -> accessControlEntry.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED) - .filter(accessControlEntry -> resource.startsWith(accessControlEntry.getSpec().getResource())) - .map(accessControlEntry -> accessControlEntry.getSpec().getResourceType()) - .collect(Collectors.toList()) - .containsAll(List.of(AccessControlEntry.ResourceType.TOPIC, AccessControlEntry.ResourceType.GROUP)); + return new HashSet<>(accessControlEntryService.findAllGrantedToNamespace(namespace) + .stream() + .filter(accessControlEntry -> accessControlEntry.getSpec().getPermission() + == AccessControlEntry.Permission.OWNER) + .filter(accessControlEntry -> accessControlEntry.getSpec().getResourcePatternType() + == AccessControlEntry.ResourcePatternType.PREFIXED) + .filter(accessControlEntry -> resource.startsWith(accessControlEntry.getSpec().getResource())) + .map(accessControlEntry -> accessControlEntry.getSpec().getResourceType()) + .toList()) + .containsAll(List.of(AccessControlEntry.ResourceType.TOPIC, AccessControlEntry.ResourceType.GROUP)); } /** - * Create a given Kafka Stream + * Create a given Kafka Stream. + * * @param stream The Kafka Stream to create * @return The created Kafka Stream */ @@ -77,11 +85,13 @@ public KafkaStream create(KafkaStream stream) { } /** - * Delete a given Kafka Stream + * Delete a given Kafka Stream. + * * @param stream The Kafka Stream */ public void delete(Namespace namespace, KafkaStream stream) { - AccessControlEntryAsyncExecutor accessControlEntryAsyncExecutor = applicationContext.getBean(AccessControlEntryAsyncExecutor.class, + AccessControlEntryAsyncExecutor accessControlEntryAsyncExecutor = + applicationContext.getBean(AccessControlEntryAsyncExecutor.class, Qualifiers.byName(stream.getMetadata().getCluster())); accessControlEntryAsyncExecutor.deleteKafkaStreams(namespace, stream); diff --git a/src/main/java/com/michelin/ns4kafka/services/TopicService.java b/src/main/java/com/michelin/ns4kafka/services/TopicService.java index 0a438288..0660093b 100644 --- a/src/main/java/com/michelin/ns4kafka/services/TopicService.java +++ b/src/main/java/com/michelin/ns4kafka/services/TopicService.java @@ -1,25 +1,33 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_COMPACT; +import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_CONFIG; +import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_DELETE; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.TopicRepository; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import org.apache.kafka.clients.admin.RecordsToDelete; -import org.apache.kafka.common.TopicPartition; - -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; +import org.apache.kafka.clients.admin.RecordsToDelete; +import org.apache.kafka.common.TopicPartition; -import static org.apache.kafka.common.config.TopicConfig.*; - +/** + * Service to manage topics. + */ @Singleton public class TopicService { @Inject @@ -32,10 +40,11 @@ public class TopicService { ApplicationContext applicationContext; @Inject - List kafkaAsyncExecutorConfig; + List managedClusterProperties; /** - * Find all topics + * Find all topics. + * * @return The list of topics */ public List findAll() { @@ -43,57 +52,62 @@ public List findAll() { } /** - * Find all topics by given namespace + * Find all topics by given namespace. + * * @param namespace The namespace * @return A list of topics */ public List findAllForNamespace(Namespace namespace) { List acls = accessControlEntryService.findAllGrantedToNamespace(namespace); return topicRepository.findAllForCluster(namespace.getMetadata().getCluster()) - .stream() - .filter(topic -> acls.stream().anyMatch(accessControlEntry -> { - //need to check accessControlEntry.Permission, we want OWNER - if (accessControlEntry.getSpec().getPermission() != AccessControlEntry.Permission.OWNER) { - return false; - } - if (accessControlEntry.getSpec().getResourceType() == AccessControlEntry.ResourceType.TOPIC) { - switch (accessControlEntry.getSpec().getResourcePatternType()) { - case PREFIXED: - return topic.getMetadata().getName().startsWith(accessControlEntry.getSpec().getResource()); - case LITERAL: - return topic.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); - } - } + .stream() + .filter(topic -> acls.stream().anyMatch(accessControlEntry -> { + //need to check accessControlEntry.Permission, we want OWNER + if (accessControlEntry.getSpec().getPermission() != AccessControlEntry.Permission.OWNER) { return false; - })) - .toList(); + } + if (accessControlEntry.getSpec().getResourceType() == AccessControlEntry.ResourceType.TOPIC) { + return switch (accessControlEntry.getSpec().getResourcePatternType()) { + case PREFIXED -> + topic.getMetadata().getName().startsWith(accessControlEntry.getSpec().getResource()); + case LITERAL -> + topic.getMetadata().getName().equals(accessControlEntry.getSpec().getResource()); + }; + } + return false; + })) + .toList(); } /** - * Find a topic by namespace and name + * Find a topic by namespace and name. + * * @param namespace The namespace - * @param topic The topic name + * @param topic The topic name * @return An optional topic */ public Optional findByName(Namespace namespace, String topic) { return findAllForNamespace(namespace) - .stream() - .filter(t -> t.getMetadata().getName().equals(topic)) - .findFirst(); + .stream() + .filter(t -> t.getMetadata().getName().equals(topic)) + .findFirst(); } /** - * Is given namespace owner of the given topic + * Is given namespace owner of the given topic. + * * @param namespace The namespace - * @param topic The topic + * @param topic The topic * @return true if it is, false otherwise */ public boolean isNamespaceOwnerOfTopic(String namespace, String topic) { - return accessControlEntryService.isNamespaceOwnerOfResource(namespace, AccessControlEntry.ResourceType.TOPIC, topic); + return accessControlEntryService.isNamespaceOwnerOfResource(namespace, AccessControlEntry.ResourceType.TOPIC, + topic); } /** - * Create a given topic + * Create a given topic. + * * @param topic The topic to create * @return The created topic */ @@ -102,38 +116,41 @@ public Topic create(Topic topic) { } /** - * Delete a given topic + * Delete a given topic. + * * @param topic The topic */ public void delete(Topic topic) throws InterruptedException, ExecutionException, TimeoutException { TopicAsyncExecutor topicAsyncExecutor = applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(topic.getMetadata().getCluster())); + Qualifiers.byName(topic.getMetadata().getCluster())); topicAsyncExecutor.deleteTopic(topic); topicRepository.delete(topic); } /** - * List all topics colliding with existing topics on broker but not in ns4kafka + * List all topics colliding with existing topics on broker but not in Ns4Kafka. + * * @param namespace The namespace - * @param topic The topic + * @param topic The topic * @return The list of colliding topics - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception - * @throws TimeoutException Any timeout exception + * @throws TimeoutException Any timeout exception */ - public List findCollidingTopics(Namespace namespace, Topic topic) throws InterruptedException, ExecutionException, TimeoutException { + public List findCollidingTopics(Namespace namespace, Topic topic) + throws InterruptedException, ExecutionException, TimeoutException { TopicAsyncExecutor topicAsyncExecutor = applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster())); + Qualifiers.byName(namespace.getMetadata().getCluster())); try { List clusterTopics = topicAsyncExecutor.listBrokerTopicNames(); return clusterTopics.stream() - // existing topics with the exact same name (and not currently in ns4kafka) should not interfere - // this topic could be created on ns4kafka during "import" step - .filter(clusterTopic -> !topic.getMetadata().getName().equals(clusterTopic)) - .filter(clusterTopic -> hasCollision(clusterTopic, topic.getMetadata().getName())) - .toList(); + // existing topics with the exact same name (and not currently in Ns4Kafka) should not interfere + // this topic could be created on Ns4Kafka during "import" step + .filter(clusterTopic -> !topic.getMetadata().getName().equals(clusterTopic)) + .filter(clusterTopic -> hasCollision(clusterTopic, topic.getMetadata().getName())) + .toList(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new InterruptedException(e.getMessage()); @@ -141,41 +158,50 @@ public List findCollidingTopics(Namespace namespace, Topic topic) throws } /** - * Validate existing topic can be updated with new given configs + * Validate existing topic can be updated with new given configs. + * * @param existingTopic The existing topic - * @param newTopic The new topic + * @param newTopic The new topic * @return A list of validation errors */ public List validateTopicUpdate(Namespace namespace, Topic existingTopic, Topic newTopic) { List validationErrors = new ArrayList<>(); if (existingTopic.getSpec().getPartitions() != newTopic.getSpec().getPartitions()) { - validationErrors.add(String.format("Invalid value %s for configuration partitions: Value is immutable (%s).", + validationErrors.add( + String.format("Invalid value %s for configuration partitions: Value is immutable (%s).", newTopic.getSpec().getPartitions(), existingTopic.getSpec().getPartitions())); } if (existingTopic.getSpec().getReplicationFactor() != newTopic.getSpec().getReplicationFactor()) { - validationErrors.add(String.format("Invalid value %s for configuration replication.factor: Value is immutable (%s).", + validationErrors.add( + String.format("Invalid value %s for configuration replication.factor: Value is immutable (%s).", newTopic.getSpec().getReplicationFactor(), existingTopic.getSpec().getReplicationFactor())); } - Optional topicCluster = kafkaAsyncExecutorConfig - .stream() - .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) - .findFirst(); - - boolean confluentCloudCluster = topicCluster.isPresent() && topicCluster.get().getProvider().equals(KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD); - if (confluentCloudCluster && existingTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG).equals(CLEANUP_POLICY_DELETE) && - newTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG).equals(CLEANUP_POLICY_COMPACT)) { - validationErrors.add(String.format("Invalid value %s for configuration cleanup.policy: Altering topic configuration from `delete` to `compact` is not currently supported. Please create a new topic with `compact` policy specified instead.", - newTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG))); + Optional topicCluster = managedClusterProperties + .stream() + .filter(cluster -> namespace.getMetadata().getCluster().equals(cluster.getName())) + .findFirst(); + + boolean confluentCloudCluster = topicCluster.isPresent() && topicCluster.get().getProvider().equals( + ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD); + if (confluentCloudCluster + && existingTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG).equals(CLEANUP_POLICY_DELETE) + && newTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG).equals(CLEANUP_POLICY_COMPACT)) { + validationErrors.add(String.format( + "Invalid value %s for configuration cleanup.policy: Altering topic configuration " + + "from `delete` to `compact` is not currently supported. " + + "Please create a new topic with `compact` policy specified instead.", + newTopic.getSpec().getConfigs().get(CLEANUP_POLICY_CONFIG))); } return validationErrors; } /** - * Check if topics collide with "_" instead of "." + * Check if topics collide with "_" instead of ".". + * * @param topicA The first topic * @param topicB The second topic * @return true if it does, false otherwise @@ -185,50 +211,55 @@ private boolean hasCollision(String topicA, String topicB) { } /** - * List the topics that are not synchronized to ns4kafka by namespace + * List the topics that are not synchronized to Ns4Kafka by namespace. + * * @param namespace The namespace * @return The list of topics - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception - * @throws TimeoutException Any timeout exception + * @throws TimeoutException Any timeout exception */ - public List listUnsynchronizedTopics(Namespace namespace) throws ExecutionException, InterruptedException, TimeoutException { + public List listUnsynchronizedTopics(Namespace namespace) + throws ExecutionException, InterruptedException, TimeoutException { TopicAsyncExecutor topicAsyncExecutor = applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster())); + Qualifiers.byName(namespace.getMetadata().getCluster())); // List topics for this namespace List topicNames = listUnsynchronizedTopicNames(namespace); // Get topics definitions Collection unsynchronizedTopics = topicAsyncExecutor.collectBrokerTopicsFromNames(topicNames) - .values(); + .values(); return new ArrayList<>(unsynchronizedTopics); } /** - * List the topic names that are not synchronized to ns4kafka by namespace + * List the topic names that are not synchronized to ns4kafka by namespace. + * * @param namespace The namespace * @return The list of topic names - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception - * @throws TimeoutException Any timeout exception + * @throws TimeoutException Any timeout exception */ - public List listUnsynchronizedTopicNames(Namespace namespace) throws ExecutionException, InterruptedException, TimeoutException { + public List listUnsynchronizedTopicNames(Namespace namespace) + throws ExecutionException, InterruptedException, TimeoutException { TopicAsyncExecutor topicAsyncExecutor = applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster())); + Qualifiers.byName(namespace.getMetadata().getCluster())); return topicAsyncExecutor.listBrokerTopicNames() - .stream() - // ...that belongs to this namespace - .filter(topic -> isNamespaceOwnerOfTopic(namespace.getMetadata().getName(), topic)) - // ...and aren't in ns4kafka storage - .filter(topic -> findByName(namespace, topic).isEmpty()) - .toList(); + .stream() + // ...that belongs to this namespace + .filter(topic -> isNamespaceOwnerOfTopic(namespace.getMetadata().getName(), topic)) + // ...and aren't in ns4kafka storage + .filter(topic -> findByName(namespace, topic).isEmpty()) + .toList(); } /** - * Validate if a topic can be eligible for records deletion + * Validate if a topic can be eligible for records deletion. + * * @param deleteRecordsTopic The topic to delete records * @return A list of errors */ @@ -244,20 +275,22 @@ public List validateDeleteRecordsTopic(Topic deleteRecordsTopic) { /** * For a given topic, get each latest offset by partition in order to delete all the records - * before these offsets + * before these offsets. + * * @param topic The topic to delete records * @return A map of offsets by topic-partitions - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception */ - public Map prepareRecordsToDelete(Topic topic) throws ExecutionException, InterruptedException { + public Map prepareRecordsToDelete(Topic topic) + throws ExecutionException, InterruptedException { TopicAsyncExecutor topicAsyncExecutor = applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(topic.getMetadata().getCluster())); + Qualifiers.byName(topic.getMetadata().getCluster())); try { return topicAsyncExecutor.prepareRecordsToDelete(topic.getMetadata().getName()) - .entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, kv -> kv.getValue().beforeOffset())); + .entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, kv -> kv.getValue().beforeOffset())); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new InterruptedException(e.getMessage()); @@ -265,18 +298,20 @@ public Map prepareRecordsToDelete(Topic topic) throws Exec } /** - * Delete the records for each partition, before each offset + * Delete the records for each partition, before each offset. + * * @param recordsToDelete The offsets by topic-partitions * @return The new offsets by topic-partitions * @throws InterruptedException Any interrupted exception */ - public Map deleteRecords(Topic topic, Map recordsToDelete) throws InterruptedException { + public Map deleteRecords(Topic topic, Map recordsToDelete) + throws InterruptedException { TopicAsyncExecutor topicAsyncExecutor = applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(topic.getMetadata().getCluster())); + Qualifiers.byName(topic.getMetadata().getCluster())); try { Map recordsToDeleteMap = recordsToDelete.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, kv -> RecordsToDelete.beforeOffset(kv.getValue()))); + .collect(Collectors.toMap(Map.Entry::getKey, kv -> RecordsToDelete.beforeOffset(kv.getValue()))); return topicAsyncExecutor.deleteRecords(recordsToDeleteMap); } catch (InterruptedException e) { diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/KafkaConnectClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/KafkaConnectClient.java index 176e5e7d..251b3812 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/KafkaConnectClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/KafkaConnectClient.java @@ -1,10 +1,16 @@ package com.michelin.ns4kafka.services.clients.connect; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.config.SecurityConfig; import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.repositories.ConnectClusterRepository; -import com.michelin.ns4kafka.services.clients.connect.entities.*; +import com.michelin.ns4kafka.services.clients.connect.entities.ConfigInfos; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorPluginInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorSpecs; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorStateInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorStatus; +import com.michelin.ns4kafka.services.clients.connect.entities.ServerInfo; import com.michelin.ns4kafka.utils.EncryptionUtils; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.core.type.Argument; @@ -15,216 +21,248 @@ import io.micronaut.http.client.annotation.Client; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.Builder; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; -import reactor.core.publisher.Mono; - import java.net.URI; import java.util.List; import java.util.Map; import java.util.Optional; +import lombok.Builder; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import reactor.core.publisher.Mono; +/** + * Kafka Connect client. + */ @Slf4j @Singleton public class KafkaConnectClient { private static final String CONNECTORS = "/connectors/"; - + @Inject + ConnectClusterRepository connectClusterRepository; @Inject @Client(id = "kafka-connect") private HttpClient httpClient; - @Inject - private List kafkaAsyncExecutorConfigs; - + private List managedClusterProperties; @Inject - ConnectClusterRepository connectClusterRepository; - - @Inject - private SecurityConfig securityConfig; + private SecurityProperties securityProperties; /** - * Get the Kafka connect version - * @param kafkaCluster The Kafka cluster + * Get the Kafka connect version. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect * @return The version */ public Mono> version(String kafkaCluster, String connectCluster) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/"))) - .basicAuth(config.getUsername(), config.getPassword()); + .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.exchange(request, ServerInfo.class)); } /** - * List all connectors - * @param kafkaCluster The Kafka cluster + * List all connectors. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect * @return The connectors */ public Mono> listAll(String kafkaCluster, String connectCluster) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/connectors?expand=info&expand=status"))) - .basicAuth(config.getUsername(), config.getPassword()); + HttpRequest request = HttpRequest.GET( + URI.create(StringUtils.prependUri(config.getUrl(), "/connectors?expand=info&expand=status"))) + .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.retrieve(request, Argument.mapOf(String.class, ConnectorStatus.class))); } /** - * Validate a connector configuration - * @param kafkaCluster The Kafka cluster + * Validate a connector configuration. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect * @param connectorClass The connector class * @param connectorSpecs The connector config * @return The configuration infos */ - public Mono validate(String kafkaCluster, String connectCluster, String connectorClass, ConnectorSpecs connectorSpecs) { + public Mono validate(String kafkaCluster, String connectCluster, String connectorClass, + ConnectorSpecs connectorSpecs) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), "/connector-plugins/" + connectorClass + "/config/validate")), connectorSpecs) - .basicAuth(config.getUsername(), config.getPassword()); + HttpRequest request = HttpRequest.PUT(URI.create( + StringUtils.prependUri(config.getUrl(), "/connector-plugins/" + + connectorClass + "/config/validate")), + connectorSpecs) + .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.retrieve(request, ConfigInfos.class)); } /** - * Create or update a connector - * @param kafkaCluster The kafka cluster + * Create or update a connector. + * + * @param kafkaCluster The kafka cluster * @param connectCluster The Kafka Connect - * @param connector The connector + * @param connector The connector * @param connectorSpecs The connector config * @return The creation or update response */ - public Mono createOrUpdate(String kafkaCluster, String connectCluster, String connector, ConnectorSpecs connectorSpecs) { + public Mono createOrUpdate(String kafkaCluster, String connectCluster, String connector, + ConnectorSpecs connectorSpecs) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/config")), connectorSpecs) + HttpRequest request = + HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/config")), + connectorSpecs) .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.retrieve(request, ConnectorInfo.class)); } /** - * Delete a connector - * @param kafkaCluster The Kafka cluster + * Delete a connector. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect - * @param connector The connector + * @param connector The connector * @return The deletion response */ public Mono> delete(String kafkaCluster, String connectCluster, String connector) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector))) + HttpRequest request = + HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector))) .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.exchange(request, Void.class)); } /** - * List all connector plugins - * @param kafkaCluster The Kafka cluster + * List all connector plugins. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect * @return The list of connector plugins */ public Mono> connectPlugins(String kafkaCluster, String connectCluster) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/connector-plugins"))) + HttpRequest request = + HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/connector-plugins"))) .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.retrieve(request, Argument.listOf(ConnectorPluginInfo.class))); } /** - * Get the status of a connector - * @param kafkaCluster The Kafka cluster + * Get the status of a connector. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect - * @param connector The connector + * @param connector The connector * @return The status */ public Mono status(String kafkaCluster, String connectCluster, String connector) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/status"))) + HttpRequest request = + HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/status"))) .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.retrieve(request, ConnectorStateInfo.class)); } /** - * Restart a connector - * @param kafkaCluster The Kafka cluster + * Restart a connector. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect - * @param connector The connector - * @param taskId The task ID + * @param connector The connector + * @param taskId The task ID * @return The restart response */ public Mono> restart(String kafkaCluster, String connectCluster, String connector, int taskId) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/tasks/" + taskId + "/restart")), null) - .basicAuth(config.getUsername(), config.getPassword()); + HttpRequest request = HttpRequest.POST(URI.create( + StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/tasks/" + + taskId + "/restart")), null) + .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.exchange(request, Void.class)); } /** - * Pause a connector - * @param kafkaCluster The Kafka cluster + * Pause a connector. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect - * @param connector The connector + * @param connector The connector * @return The pause response */ public Mono> pause(String kafkaCluster, String connectCluster, String connector) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/pause")), null) + HttpRequest request = + HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/pause")), + null) .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.exchange(request, Void.class)); } /** - * Resume a connector - * @param kafkaCluster The Kafka cluster + * Resume a connector. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect - * @param connector The connector + * @param connector The connector * @return The resume response */ public Mono> resume(String kafkaCluster, String connectCluster, String connector) { KafkaConnectHttpConfig config = getKafkaConnectConfig(kafkaCluster, connectCluster); - HttpRequest request = HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/resume")), null) + HttpRequest request = + HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONNECTORS + connector + "/resume")), + null) .basicAuth(config.getUsername(), config.getPassword()); return Mono.from(httpClient.exchange(request, Void.class)); } /** - * Get the Kafka Connect configuration - * @param kafkaCluster The Kafka cluster + * Get the Kafka Connect configuration. + * + * @param kafkaCluster The Kafka cluster * @param connectCluster The Kafka Connect * @return The Kafka Connect configuration */ public KafkaConnectClient.KafkaConnectHttpConfig getKafkaConnectConfig(String kafkaCluster, String connectCluster) { - Optional config = kafkaAsyncExecutorConfigs.stream() - .filter(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster)) - .findFirst(); + Optional config = managedClusterProperties.stream() + .filter(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster)) + .findFirst(); if (config.isEmpty()) { - throw new ResourceValidationException(List.of("Kafka cluster \"" + kafkaCluster + "\" not found"), null, null); + throw new ResourceValidationException(List.of("Kafka cluster \"" + kafkaCluster + "\" not found"), null, + null); } Optional connectClusterOptional = connectClusterRepository.findAll() - .stream() - .filter(researchConnectCluster -> researchConnectCluster.getMetadata().getName().equals(connectCluster)) - .findFirst(); + .stream() + .filter(researchConnectCluster -> researchConnectCluster.getMetadata().getName().equals(connectCluster)) + .findFirst(); if (connectClusterOptional.isPresent()) { return KafkaConnectClient.KafkaConnectHttpConfig.builder() - .url(connectClusterOptional.get().getSpec().getUrl()) - .username(connectClusterOptional.get().getSpec().getUsername()) - .password(EncryptionUtils.decryptAES256GCM(connectClusterOptional.get().getSpec().getPassword(), securityConfig.getAes256EncryptionKey())) - .build(); + .url(connectClusterOptional.get().getSpec().getUrl()) + .username(connectClusterOptional.get().getSpec().getUsername()) + .password(EncryptionUtils.decryptAes256Gcm(connectClusterOptional.get().getSpec().getPassword(), + securityProperties.getAes256EncryptionKey())) + .build(); } - KafkaAsyncExecutorConfig.ConnectConfig connectConfig = config.get().getConnects().get(connectCluster); + ManagedClusterProperties.ConnectProperties connectConfig = config.get().getConnects().get(connectCluster); if (connectConfig == null) { - throw new ResourceValidationException(List.of("Connect cluster \"" + connectCluster + "\" not found"), null, null); + throw new ResourceValidationException(List.of("Connect cluster \"" + connectCluster + "\" not found"), null, + null); } return KafkaConnectClient.KafkaConnectHttpConfig.builder() - .url(connectConfig.getUrl()) - .username(connectConfig.getBasicAuthUsername()) - .password(connectConfig.getBasicAuthPassword()) - .build(); + .url(connectConfig.getUrl()) + .username(connectConfig.getBasicAuthUsername()) + .password(connectConfig.getBasicAuthPassword()) + .build(); } - @Builder + /** + * Kafka Connect HTTP configuration. + */ @Getter + @Builder public static class KafkaConnectHttpConfig { private String url; private String username; diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfo.java index 0444aca4..d3309afa 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfo.java @@ -14,9 +14,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; -public record ConfigInfo(@JsonProperty("definition") ConfigKeyInfo configKey, @JsonProperty("value") ConfigValueInfo configValue) { +/** + * Connector configuration information. + * + * @param configKey Config key + * @param configValue Config value + */ +public record ConfigInfo(@JsonProperty("definition") ConfigKeyInfo configKey, + @JsonProperty("value") ConfigValueInfo configValue) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfos.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfos.java index 62a1e41d..d03ef2fb 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfos.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigInfos.java @@ -14,11 +14,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; - import java.util.List; -public record ConfigInfos(String name, @JsonProperty("error_count") int errorCount, List groups, List configs) { +/** + * Connector configuration information. + * + * @param name Name + * @param errorCount Number of errors + * @param groups Groups + * @param configs Configurations + */ +public record ConfigInfos(String name, @JsonProperty("error_count") int errorCount, List groups, + List configs) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigKeyInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigKeyInfo.java index cd1ac20c..573f1960 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigKeyInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigKeyInfo.java @@ -14,12 +14,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; - import java.util.List; +/** + * Connector configuration information. + * + * @param name Name + * @param type Type + * @param required Required + * @param defaultValue Default value + * @param importance Importance + * @param documentation Documentation + * @param group Group + * @param orderInGroup Order in group + * @param width Width + * @param displayName Display name + * @param dependents Dependents + */ public record ConfigKeyInfo(String name, String type, @JsonProperty("required") boolean required, @JsonProperty("default_value") String defaultValue, String importance, String documentation, String group, @JsonProperty("order_in_group") int orderInGroup, diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigValueInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigValueInfo.java index e1ec72bf..fdfab455 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigValueInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConfigValueInfo.java @@ -14,12 +14,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; - import java.util.List; -public record ConfigValueInfo(String name, String value, @JsonProperty("recommended_values") List recommendedValues, +/** + * Connector configuration information. + * + * @param name Name + * @param value Value + * @param recommendedValues Recommended values + * @param errors Errors + * @param visible Visible + */ +public record ConfigValueInfo(String name, String value, + @JsonProperty("recommended_values") List recommendedValues, List errors, @JsonProperty("visible") boolean visible) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorInfo.java index af3ff430..4d0edafe 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorInfo.java @@ -14,10 +14,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import java.util.List; import java.util.Map; +/** + * Connector info. + * + * @param name Name + * @param config Config + * @param tasks Tasks + * @param type Type + */ public record ConnectorInfo(String name, Map config, List tasks, ConnectorType type) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorPluginInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorPluginInfo.java index 6178b25c..a519fc68 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorPluginInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorPluginInfo.java @@ -14,9 +14,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; +/** + * Connector plugin info. + * + * @param className Class name + * @param type Type + * @param version Version + */ public record ConnectorPluginInfo(@JsonProperty("class") String className, ConnectorType type, String version) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorSpecs.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorSpecs.java index 4a1c8b79..e6c2291a 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorSpecs.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorSpecs.java @@ -2,10 +2,15 @@ import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonInclude; -import lombok.Builder; - import java.util.Map; +import lombok.Builder; +/** + * Connector specs. + * + * @param config Config + */ @Builder -public record ConnectorSpecs(@JsonAnyGetter @JsonInclude(value = JsonInclude.Include.NON_ABSENT) Map config) { +public record ConnectorSpecs( + @JsonAnyGetter @JsonInclude(value = JsonInclude.Include.NON_ABSENT) Map config) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStateInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStateInfo.java index cd49f6c4..b73ebf11 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStateInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStateInfo.java @@ -14,17 +14,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; -import lombok.Getter; - import java.util.List; import java.util.Objects; +import lombok.Getter; +/** + * Connector state info. + * + * @param name Name + * @param connector Connector + * @param tasks Tasks + * @param type Type + */ public record ConnectorStateInfo(String name, ConnectorState connector, List tasks, ConnectorType type) { - + /** + * Abstract state. + */ @Getter public abstract static class AbstractState { private final String state; @@ -40,6 +50,9 @@ public abstract static class AbstractState { } } + /** + * Connector state. + */ public static class ConnectorState extends AbstractState { public ConnectorState(@JsonProperty("state") String state, @JsonProperty("worker_id") String worker, @JsonProperty("msg") String msg) { @@ -47,11 +60,15 @@ public ConnectorState(@JsonProperty("state") String state, @JsonProperty("worker } } + /** + * Task state. + */ @Getter public static class TaskState extends AbstractState implements Comparable { private final int id; - public TaskState(@JsonProperty("id") int id, @JsonProperty("state") String state, @JsonProperty("worker_id") String worker, + public TaskState(@JsonProperty("id") int id, @JsonProperty("state") String state, + @JsonProperty("worker_id") String worker, @JsonProperty("msg") String msg) { super(state, worker, msg); this.id = id; @@ -64,10 +81,12 @@ public int compareTo(TaskState that) { @Override public boolean equals(Object o) { - if (o == this) + if (o == this) { return true; - if (!(o instanceof TaskState other)) + } + if (!(o instanceof TaskState other)) { return false; + } return compareTo(other) == 0; } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStatus.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStatus.java index d3332213..bf31a434 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStatus.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorStatus.java @@ -1,4 +1,10 @@ package com.michelin.ns4kafka.services.clients.connect.entities; +/** + * Connector status. + * + * @param info Connector info + * @param status Connector status info + */ public record ConnectorStatus(ConnectorInfo info, ConnectorStateInfo status) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorTaskId.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorTaskId.java index 8b2ba95e..f684d211 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorTaskId.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorTaskId.java @@ -14,22 +14,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.Serializable; /** * Unique ID for a single task. It includes a unique connector ID and a task ID that is unique within * the connector. */ -public record ConnectorTaskId(@JsonProperty("connector") String connector, @JsonProperty("task") int task) implements Serializable, Comparable { +public record ConnectorTaskId(@JsonProperty("connector") String connector, @JsonProperty("task") int task) + implements Serializable, Comparable { @Override public int compareTo(ConnectorTaskId o) { int connectorCmp = connector.compareTo(o.connector); - if (connectorCmp != 0) + if (connectorCmp != 0) { return connectorCmp; + } return Integer.compare(task, o.task); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorType.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorType.java index 11e95360..77a76c12 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorType.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ConnectorType.java @@ -14,24 +14,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonValue; - import java.util.Locale; +/** + * Connector type. + */ public enum ConnectorType { SOURCE, SINK, UNKNOWN; + @JsonCreator + public static ConnectorType forValue(String value) { + return ConnectorType.valueOf(value.toUpperCase(Locale.ROOT)); + } + @Override @JsonValue public String toString() { return super.toString().toLowerCase(Locale.ROOT); } - - @JsonCreator - public static ConnectorType forValue(String value) { - return ConnectorType.valueOf(value.toUpperCase(Locale.ROOT)); - } } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ServerInfo.java b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ServerInfo.java index 799ec15d..65e5aa17 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ServerInfo.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/connect/entities/ServerInfo.java @@ -14,11 +14,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package com.michelin.ns4kafka.services.clients.connect.entities; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.Builder; +/** + * Kafka cluster info. + * + * @param version The version + * @param commit The commit + * @param kafkaClusterId The Kafka cluster id + */ @Builder -public record ServerInfo(@JsonProperty("version") String version, @JsonProperty("commit") String commit, @JsonProperty("kafka_cluster_id") String kafkaClusterId) { +public record ServerInfo(@JsonProperty("version") String version, @JsonProperty("commit") String commit, + @JsonProperty("kafka_cluster_id") String kafkaClusterId) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java index c16a12e5..7b8c88fa 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/SchemaRegistryClient.java @@ -1,7 +1,11 @@ package com.michelin.ns4kafka.services.clients.schema; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.services.clients.schema.entities.*; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityCheckResponse; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityRequest; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityResponse; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaRequest; +import com.michelin.ns4kafka.services.clients.schema.entities.SchemaResponse; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.core.util.StringUtils; import io.micronaut.http.HttpRequest; @@ -12,14 +16,16 @@ import io.micronaut.http.client.exceptions.HttpClientResponseException; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.net.URI; import java.util.List; import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +/** + * Schema registry client. + */ @Slf4j @Singleton public class SchemaRegistryClient { @@ -31,137 +37,159 @@ public class SchemaRegistryClient { private HttpClient httpClient; @Inject - private List kafkaAsyncExecutorConfigs; + private List managedClusterProperties; /** - * List subjects + * List subjects. + * * @param kafkaCluster The Kafka cluster * @return A list of subjects */ public Flux getSubjects(String kafkaCluster) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), "/subjects"))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Flux.from(httpClient.retrieve(request, String[].class)).flatMap(Flux::fromArray); } /** - * Get a latest version of a subject + * Get a latest version of a subject. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject + * @param subject The subject * @return A version of a subject */ public Mono getLatestSubject(String kafkaCluster, String subject) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), SUBJECTS + subject + "/versions/latest"))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); + HttpRequest request = HttpRequest.GET( + URI.create(StringUtils.prependUri(config.getUrl(), SUBJECTS + subject + "/versions/latest"))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, SchemaResponse.class)) - .onErrorResume(HttpClientResponseException.class, - ex -> ex.getStatus().equals(HttpStatus.NOT_FOUND) ? Mono.empty() : Mono.error(ex)); + .onErrorResume(HttpClientResponseException.class, + ex -> ex.getStatus().equals(HttpStatus.NOT_FOUND) ? Mono.empty() : Mono.error(ex)); } /** - * Register a subject and a schema + * Register a subject and a schema. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject - * @param body The schema + * @param subject The subject + * @param body The schema * @return The response of the registration */ public Mono register(String kafkaCluster, String subject, SchemaRequest body) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), SUBJECTS + subject + "/versions")), body) + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); + HttpRequest request = + HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), SUBJECTS + subject + "/versions")), + body) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, SchemaResponse.class)); } /** - * Delete a subject + * Delete a subject. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject - * @param hardDelete Should the subject be hard deleted or not + * @param subject The subject + * @param hardDelete Should the subject be hard deleted or not * @return The versions of the deleted subject */ public Mono deleteSubject(String kafkaCluster, String subject, boolean hardDelete) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); - MutableHttpRequest request = HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), SUBJECTS + subject + "?permanent=" + hardDelete))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); + MutableHttpRequest request = HttpRequest.DELETE( + URI.create(StringUtils.prependUri(config.getUrl(), SUBJECTS + subject + "?permanent=" + hardDelete))) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, Integer[].class)); } /** - * Validate the schema compatibility + * Validate the schema compatibility. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject - * @param body The request + * @param subject The subject + * @param body The request * @return The schema compatibility validation */ - public Mono validateSchemaCompatibility(String kafkaCluster, String subject, SchemaRequest body) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.POST(URI.create(StringUtils.prependUri(config.getUrl(), "/compatibility/subjects/" + subject + "/versions?verbose=true")), body) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + public Mono validateSchemaCompatibility(String kafkaCluster, String subject, + SchemaRequest body) { + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); + HttpRequest request = HttpRequest.POST(URI.create( + StringUtils.prependUri(config.getUrl(), "/compatibility/subjects/" + subject + + "/versions?verbose=true")), + body) + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, SchemaCompatibilityCheckResponse.class)) - .onErrorResume(HttpClientResponseException.class, - ex -> ex.getStatus().equals(HttpStatus.NOT_FOUND) ? Mono.empty() : Mono.error(ex)); + .onErrorResume(HttpClientResponseException.class, + ex -> ex.getStatus().equals(HttpStatus.NOT_FOUND) ? Mono.empty() : Mono.error(ex)); } /** - * Update the subject compatibility + * Update the subject compatibility. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject - * @param body The schema compatibility request + * @param subject The subject + * @param body The schema compatibility request * @return The schema compatibility update */ - public Mono updateSubjectCompatibility(String kafkaCluster, String subject, SchemaCompatibilityRequest body) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); - HttpRequest request = HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONFIG + subject)), body) + public Mono updateSubjectCompatibility(String kafkaCluster, String subject, + SchemaCompatibilityRequest body) { + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); + HttpRequest request = + HttpRequest.PUT(URI.create(StringUtils.prependUri(config.getUrl(), CONFIG + subject)), body) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, SchemaCompatibilityResponse.class)); } /** - * Get the current compatibility by subject + * Get the current compatibility by subject. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject + * @param subject The subject * @return The current schema compatibility */ public Mono getCurrentCompatibilityBySubject(String kafkaCluster, String subject) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); HttpRequest request = HttpRequest.GET(URI.create(StringUtils.prependUri(config.getUrl(), CONFIG + subject))) - .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); + .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, SchemaCompatibilityResponse.class)) - .onErrorResume(HttpClientResponseException.class, - ex -> ex.getStatus().equals(HttpStatus.NOT_FOUND) ? Mono.empty() : Mono.error(ex)); + .onErrorResume(HttpClientResponseException.class, + ex -> ex.getStatus().equals(HttpStatus.NOT_FOUND) ? Mono.empty() : Mono.error(ex)); } /** - * Delete current compatibility by subject + * Delete current compatibility by subject. + * * @param kafkaCluster The Kafka cluster - * @param subject The subject + * @param subject The subject * @return The deleted schema compatibility */ public Mono deleteCurrentCompatibilityBySubject(String kafkaCluster, String subject) { - KafkaAsyncExecutorConfig.RegistryConfig config = getSchemaRegistry(kafkaCluster); - MutableHttpRequest request = HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), CONFIG + subject))) + ManagedClusterProperties.SchemaRegistryProperties config = getSchemaRegistry(kafkaCluster); + MutableHttpRequest request = + HttpRequest.DELETE(URI.create(StringUtils.prependUri(config.getUrl(), CONFIG + subject))) .basicAuth(config.getBasicAuthUsername(), config.getBasicAuthPassword()); return Mono.from(httpClient.retrieve(request, SchemaCompatibilityResponse.class)); } /** - * Get the schema registry of the given Kafka cluster + * Get the schema registry of the given Kafka cluster. + * * @param kafkaCluster The Kafka cluster * @return The schema registry configuration */ - private KafkaAsyncExecutorConfig.RegistryConfig getSchemaRegistry(String kafkaCluster) { - Optional config = kafkaAsyncExecutorConfigs.stream() - .filter(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster)) - .findFirst(); + private ManagedClusterProperties.SchemaRegistryProperties getSchemaRegistry(String kafkaCluster) { + Optional config = managedClusterProperties.stream() + .filter(kafkaAsyncExecutorConfig -> kafkaAsyncExecutorConfig.getName().equals(kafkaCluster)) + .findFirst(); if (config.isEmpty()) { - throw new ResourceValidationException(List.of("Kafka Cluster [" + kafkaCluster + "] not found"), null, null); + throw new ResourceValidationException(List.of("Kafka Cluster [" + kafkaCluster + "] not found"), null, + null); } if (config.get().getSchemaRegistry() == null) { - throw new ResourceValidationException(List.of("Kafka Cluster [" + kafkaCluster + "] has no schema registry"), null, null); + throw new ResourceValidationException( + List.of("Kafka Cluster [" + kafkaCluster + "] has no schema registry"), null, null); } return config.get().getSchemaRegistry(); diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityCheckResponse.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityCheckResponse.java index 3cc089c0..7581adbc 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityCheckResponse.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityCheckResponse.java @@ -1,10 +1,16 @@ package com.michelin.ns4kafka.services.clients.schema.entities; import com.fasterxml.jackson.annotation.JsonProperty; -import lombok.Builder; - import java.util.List; +import lombok.Builder; +/** + * Schema compatibility check response. + * + * @param isCompatible Whether the schema is compatible or not + * @param messages The list of messages + */ @Builder -public record SchemaCompatibilityCheckResponse(@JsonProperty("is_compatible") boolean isCompatible, List messages) { +public record SchemaCompatibilityCheckResponse(@JsonProperty("is_compatible") boolean isCompatible, + List messages) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityRequest.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityRequest.java index 4b58667f..d59f8f4e 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityRequest.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityRequest.java @@ -2,6 +2,11 @@ import lombok.Builder; +/** + * Schema compatibility request. + * + * @param compatibility The compatibility + */ @Builder public record SchemaCompatibilityRequest(String compatibility) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityResponse.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityResponse.java index 63611e47..824a8560 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityResponse.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaCompatibilityResponse.java @@ -3,6 +3,11 @@ import com.michelin.ns4kafka.models.schema.Schema; import lombok.Builder; +/** + * Schema compatibility response. + * + * @param compatibilityLevel The compatibility level + */ @Builder public record SchemaCompatibilityResponse(Schema.Compatibility compatibilityLevel) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaRequest.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaRequest.java index 6a59c291..30ca7208 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaRequest.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaRequest.java @@ -1,10 +1,12 @@ package com.michelin.ns4kafka.services.clients.schema.entities; import com.michelin.ns4kafka.models.schema.Schema; -import lombok.Builder; - import java.util.List; +import lombok.Builder; +/** + * Schema request. + */ @Builder public record SchemaRequest(String schemaType, String schema, List references) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaResponse.java b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaResponse.java index 7107b1a8..5d9c4388 100644 --- a/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaResponse.java +++ b/src/main/java/com/michelin/ns4kafka/services/clients/schema/entities/SchemaResponse.java @@ -2,6 +2,15 @@ import lombok.Builder; +/** + * Schema response. + * + * @param id The id + * @param version The version + * @param subject The subject + * @param schema The schema + * @param schemaType The schema type + */ @Builder public record SchemaResponse(Integer id, Integer version, String subject, String schema, String schemaType) { } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java index d8cc82ef..d22ef3ca 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/AccessControlEntryAsyncExecutor.java @@ -1,9 +1,14 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import static com.michelin.ns4kafka.models.AccessControlEntry.ResourceType.GROUP; +import static com.michelin.ns4kafka.models.AccessControlEntry.ResourceType.TOPIC; +import static com.michelin.ns4kafka.models.AccessControlEntry.ResourceType.TRANSACTIONAL_ID; +import static com.michelin.ns4kafka.services.AccessControlEntryService.PUBLIC_GRANTED_TO; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.KafkaStream; import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.NamespaceRepository; import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; import com.michelin.ns4kafka.services.AccessControlEntryService; @@ -12,6 +17,13 @@ import io.micronaut.context.annotation.EachBean; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; +import java.util.stream.Stream; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.common.acl.AclBinding; @@ -22,24 +34,16 @@ import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Function; -import java.util.stream.Stream; - -import static com.michelin.ns4kafka.models.AccessControlEntry.ResourceType.*; -import static com.michelin.ns4kafka.services.AccessControlEntryService.PUBLIC_GRANTED_TO; - +/** + * Access control entry executor. + */ @Slf4j -@EachBean(KafkaAsyncExecutorConfig.class) +@EachBean(ManagedClusterProperties.class) @Singleton public class AccessControlEntryAsyncExecutor { private static final String USER_PRINCIPAL = "User:"; - private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; + private final ManagedClusterProperties managedClusterProperties; @Inject AccessControlEntryService accessControlEntryService; @@ -53,57 +57,59 @@ public class AccessControlEntryAsyncExecutor { @Inject NamespaceRepository namespaceRepository; - public AccessControlEntryAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; + public AccessControlEntryAsyncExecutor(ManagedClusterProperties managedClusterProperties) { + this.managedClusterProperties = managedClusterProperties; } /** - * Run the ACL executor + * Run the ACLs synchronization. */ public void run() { - if (this.kafkaAsyncExecutorConfig.isManageAcls()) { - synchronizeACLs(); + if (this.managedClusterProperties.isManageAcls()) { + synchronizeAcls(); } } /** - * Start the ACLs synchronization + * Start the ACLs synchronization. */ - private void synchronizeACLs() { - log.debug("Starting ACL collection for cluster {}", kafkaAsyncExecutorConfig.getName()); + private void synchronizeAcls() { + log.debug("Starting ACL collection for cluster {}", managedClusterProperties.getName()); try { // List ACLs from broker - List brokerACLs = collectBrokerACLs(true); + List brokerAcls = collectBrokerAcls(true); // List ACLs from NS4Kafka - List ns4kafkaACLs = collectNs4KafkaACLs(); + List ns4kafkaAcls = collectNs4KafkaAcls(); - List toCreate = ns4kafkaACLs.stream() - .filter(aclBinding -> !brokerACLs.contains(aclBinding)) - .toList(); + List toCreate = ns4kafkaAcls.stream() + .filter(aclBinding -> !brokerAcls.contains(aclBinding)) + .toList(); - List toDelete = brokerACLs.stream() - .filter(aclBinding -> !ns4kafkaACLs.contains(aclBinding)) - .toList(); + List toDelete = brokerAcls.stream() + .filter(aclBinding -> !ns4kafkaAcls.contains(aclBinding)) + .toList(); if (!toCreate.isEmpty()) { - log.debug("ACL(s) to create: " + String.join("," , toCreate.stream().map(AclBinding::toString).toList())); + log.debug( + "ACL(s) to create: " + String.join(",", toCreate.stream().map(AclBinding::toString).toList())); } if (!toDelete.isEmpty()) { - if (!kafkaAsyncExecutorConfig.isDropUnsyncAcls()) { + if (!managedClusterProperties.isDropUnsyncAcls()) { log.debug("The ACL drop is disabled. The following ACLs won't be deleted."); } - log.debug("ACL(s) to delete: " + String.join("," , toDelete.stream().map(AclBinding::toString).toList())); + log.debug( + "ACL(s) to delete: " + String.join(",", toDelete.stream().map(AclBinding::toString).toList())); } // Execute toAdd list BEFORE toDelete list to avoid breaking ACL on connected user // such as deleting only to add one second later - createACLs(toCreate); + createAcls(toCreate); - if (kafkaAsyncExecutorConfig.isDropUnsyncAcls()) { - deleteACLs(toDelete); + if (managedClusterProperties.isDropUnsyncAcls()) { + deleteAcls(toDelete); } } catch (KafkaStoreException | ExecutionException | TimeoutException e) { log.error("An error occurred collecting ACLs from broker during ACLs synchronization", e); @@ -116,52 +122,58 @@ private void synchronizeACLs() { /** * Collect the ACLs from Ns4Kafka. * Whenever the permission is OWNER, create 2 entries (one READ and one WRITE) - * This is necessary to translate ns4kafka grouped AccessControlEntry (OWNER, WRITE, READ) + * This is necessary to translate Ns4Kafka grouped AccessControlEntry (OWNER, WRITE, READ) * into Kafka Atomic ACLs (READ and WRITE) + * * @return A list of ACLs */ - private List collectNs4KafkaACLs() { - List namespaces = namespaceRepository.findAllForCluster(kafkaAsyncExecutorConfig.getName()); + private List collectNs4KafkaAcls() { + List namespaces = namespaceRepository.findAllForCluster(managedClusterProperties.getName()); - // Converts topic, group and transaction Ns4kafka ACLs to topic and group Kafka AclBindings - Stream aclBindingFromACLs = namespaces + // Converts topic, group and transaction Ns4Kafka ACLs to topic and group Kafka AclBindings + Stream aclBindingsFromAcls = namespaces + .stream() + .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace) .stream() - .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace) - .stream() - .filter(accessControlEntry -> (List.of(TOPIC, GROUP, TRANSACTIONAL_ID).contains(accessControlEntry.getSpec().getResourceType()))) - .flatMap(accessControlEntry -> buildAclBindingsFromAccessControlEntry(accessControlEntry, namespace.getSpec().getKafkaUser()) - .stream()) - .distinct()); + .filter(accessControlEntry -> (List.of(TOPIC, GROUP, TRANSACTIONAL_ID) + .contains(accessControlEntry.getSpec().getResourceType()))) + .flatMap(accessControlEntry -> buildAclBindingsFromAccessControlEntry(accessControlEntry, + namespace.getSpec().getKafkaUser()) + .stream()) + .distinct()); // Converts KafkaStream resources to topic (CREATE/DELETE) AclBindings - Stream aclBindingFromKStream = namespaces.stream() - .flatMap(namespace -> streamService.findAllForNamespace(namespace) - .stream() - .flatMap(kafkaStream -> - buildAclBindingsFromKafkaStream(kafkaStream, namespace.getSpec().getKafkaUser()).stream())); + Stream aclBindingFromKstream = namespaces.stream() + .flatMap(namespace -> streamService.findAllForNamespace(namespace) + .stream() + .flatMap(kafkaStream -> + buildAclBindingsFromKafkaStream(kafkaStream, namespace.getSpec().getKafkaUser()).stream())); // Converts connect ACLs to group AclBindings (connect-) Stream aclBindingFromConnect = namespaces.stream() - .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace) - .stream() - .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT) - .filter(accessControlEntry -> accessControlEntry.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) - .flatMap(accessControlEntry -> - buildAclBindingsFromConnector(accessControlEntry, namespace.getSpec().getKafkaUser()).stream())); - - List ns4kafkaACLs = Stream.of(aclBindingFromACLs, aclBindingFromKStream, aclBindingFromConnect) - .flatMap(Function.identity()) - .toList(); - - if (!ns4kafkaACLs.isEmpty()) { - log.trace("ACL(s) found in Ns4Kafka: " + String.join("," , ns4kafkaACLs.stream().map(AclBinding::toString).toList())); + .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace) + .stream() + .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() + == AccessControlEntry.ResourceType.CONNECT) + .filter(accessControlEntry -> accessControlEntry.getSpec().getPermission() + == AccessControlEntry.Permission.OWNER) + .flatMap(accessControlEntry -> + buildAclBindingsFromConnector(accessControlEntry, namespace.getSpec().getKafkaUser()).stream())); + + List ns4kafkaAcls = Stream.of(aclBindingsFromAcls, aclBindingFromKstream, aclBindingFromConnect) + .flatMap(Function.identity()) + .toList(); + + if (!ns4kafkaAcls.isEmpty()) { + log.trace("ACL(s) found in Ns4Kafka: " + + String.join(",", ns4kafkaAcls.stream().map(AclBinding::toString).toList())); } - return ns4kafkaACLs; + return ns4kafkaAcls; } /** - * Collect the ACLs from broker + * Collect the ACLs from broker. * * @param managedUsersOnly Only retrieve ACLs from Kafka user managed by Ns4Kafka or not ? * @return A list of ACLs @@ -169,54 +181,62 @@ private List collectNs4KafkaACLs() { * @throws InterruptedException Any interrupted exception during ACLs description * @throws TimeoutException Any timeout exception during ACLs description */ - private List collectBrokerACLs(boolean managedUsersOnly) throws ExecutionException, InterruptedException, TimeoutException { - List validResourceTypes = List.of(ResourceType.TOPIC, ResourceType.GROUP, ResourceType.TRANSACTIONAL_ID); - - List userACLs = getAdminClient() - .describeAcls(AclBindingFilter.ANY) - .values().get(10, TimeUnit.SECONDS) - .stream() - .filter(aclBinding -> validResourceTypes.contains(aclBinding.pattern().resourceType())) - .toList(); + private List collectBrokerAcls(boolean managedUsersOnly) + throws ExecutionException, InterruptedException, TimeoutException { + List validResourceTypes = + List.of(ResourceType.TOPIC, ResourceType.GROUP, ResourceType.TRANSACTIONAL_ID); + + List userAcls = getAdminClient() + .describeAcls(AclBindingFilter.ANY) + .values().get(10, TimeUnit.SECONDS) + .stream() + .filter(aclBinding -> validResourceTypes.contains(aclBinding.pattern().resourceType())) + .toList(); if (managedUsersOnly) { // Collect the list of users managed in Ns4Kafka List managedUsers = new ArrayList<>(); managedUsers.add(USER_PRINCIPAL + PUBLIC_GRANTED_TO); - managedUsers.addAll(namespaceRepository.findAllForCluster(kafkaAsyncExecutorConfig.getName()) - .stream() - .flatMap(namespace -> Stream.of(USER_PRINCIPAL + namespace.getSpec().getKafkaUser())) - .toList()); + managedUsers.addAll(namespaceRepository.findAllForCluster(managedClusterProperties.getName()) + .stream() + .flatMap(namespace -> Stream.of(USER_PRINCIPAL + namespace.getSpec().getKafkaUser())) + .toList()); // Filter out the ACLs to retain only those matching - userACLs = userACLs - .stream() - .filter(aclBinding -> managedUsers.contains(aclBinding.entry().principal())) - .toList(); + userAcls = userAcls + .stream() + .filter(aclBinding -> managedUsers.contains(aclBinding.entry().principal())) + .toList(); - if (!userACLs.isEmpty()) { - log.trace("ACL(s) found in broker (managed scope): " + String.join("," , userACLs.stream().map(AclBinding::toString).toList())); + if (!userAcls.isEmpty()) { + log.trace("ACL(s) found in broker (managed scope): " + + String.join(",", userAcls.stream().map(AclBinding::toString).toList())); } } - if (!userACLs.isEmpty()) { - log.trace("ACL(s) found in broker: " + String.join("," , userACLs.stream().map(AclBinding::toString).toList())); + if (!userAcls.isEmpty()) { + log.trace( + "ACL(s) found in broker: " + String.join(",", userAcls.stream().map(AclBinding::toString).toList())); } - return userACLs; + return userAcls; } /** - * Convert Ns4Kafka topic/group ACL into Kafka ACL + * Convert Ns4Kafka topic and group ACL into Kafka ACL. + * * @param accessControlEntry The Ns4Kafka ACL * @param kafkaUser The ACL owner * @return A list of Kafka ACLs */ - private List buildAclBindingsFromAccessControlEntry(AccessControlEntry accessControlEntry, String kafkaUser) { + private List buildAclBindingsFromAccessControlEntry(AccessControlEntry accessControlEntry, + String kafkaUser) { // Convert pattern, convert resource type from Ns4Kafka to org.apache.kafka.common types - PatternType patternType = PatternType.fromString(accessControlEntry.getSpec().getResourcePatternType().toString()); + PatternType patternType = + PatternType.fromString(accessControlEntry.getSpec().getResourcePatternType().toString()); ResourceType resourceType = ResourceType.fromString(accessControlEntry.getSpec().getResourceType().toString()); - ResourcePattern resourcePattern = new ResourcePattern(resourceType, accessControlEntry.getSpec().getResource(), patternType); + ResourcePattern resourcePattern = + new ResourcePattern(resourceType, accessControlEntry.getSpec().getResource(), patternType); // Generate the required AclOperation based on ResourceType List targetAclOperations; @@ -224,20 +244,24 @@ private List buildAclBindingsFromAccessControlEntry(AccessControlEnt targetAclOperations = computeAclOperationForOwner(resourceType); } else { // Should be READ or WRITE - targetAclOperations = List.of(AclOperation.fromString(accessControlEntry.getSpec().getPermission().toString())); + targetAclOperations = + List.of(AclOperation.fromString(accessControlEntry.getSpec().getPermission().toString())); } - final String aclUser = accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO) ? PUBLIC_GRANTED_TO : kafkaUser; + final String aclUser = + accessControlEntry.getSpec().getGrantedTo().equals(PUBLIC_GRANTED_TO) ? PUBLIC_GRANTED_TO : kafkaUser; return targetAclOperations - .stream() - .map(aclOperation -> - new AclBinding(resourcePattern, new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + aclUser, - "*", aclOperation, AclPermissionType.ALLOW))) - .toList(); + .stream() + .map(aclOperation -> + new AclBinding(resourcePattern, + new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + aclUser, + "*", aclOperation, AclPermissionType.ALLOW))) + .toList(); } /** - * Convert Kafka Stream resource into Kafka ACL + * Convert Kafka Stream resource into Kafka ACL. + * * @param stream The Kafka Stream resource * @param kafkaUser The ACL owner * @return A list of Kafka ACLs @@ -245,25 +269,29 @@ private List buildAclBindingsFromAccessControlEntry(AccessControlEnt private List buildAclBindingsFromKafkaStream(KafkaStream stream, String kafkaUser) { // As per https://docs.confluent.io/platform/current/streams/developer-guide/security.html#required-acl-setting-for-secure-ak-clusters return List.of( - // CREATE and DELETE on Stream Topics - new AclBinding( - new ResourcePattern(ResourceType.TOPIC, stream.getMetadata().getName(), PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.CREATE, AclPermissionType.ALLOW) - ), - new AclBinding( - new ResourcePattern(ResourceType.TOPIC, stream.getMetadata().getName(), PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.DELETE, AclPermissionType.ALLOW) - ), - // WRITE on TransactionalId - new AclBinding( - new ResourcePattern(ResourceType.TRANSACTIONAL_ID, stream.getMetadata().getName(), PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.WRITE, AclPermissionType.ALLOW) - ) + // CREATE and DELETE on Stream Topics + new AclBinding( + new ResourcePattern(ResourceType.TOPIC, stream.getMetadata().getName(), PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.CREATE, + AclPermissionType.ALLOW) + ), + new AclBinding( + new ResourcePattern(ResourceType.TOPIC, stream.getMetadata().getName(), PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.DELETE, + AclPermissionType.ALLOW) + ), + // WRITE on TransactionalId + new AclBinding( + new ResourcePattern(ResourceType.TRANSACTIONAL_ID, stream.getMetadata().getName(), + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.WRITE, + AclPermissionType.ALLOW) + ) ); } /** - * Convert Ns4Kafka connect ACL into Kafka ACL + * Convert Ns4Kafka connect ACL into Kafka ACL. * * @param acl The Ns4Kafka ACL * @param kafkaUser The ACL owner @@ -272,124 +300,124 @@ private List buildAclBindingsFromKafkaStream(KafkaStream stream, Str private List buildAclBindingsFromConnector(AccessControlEntry acl, String kafkaUser) { PatternType patternType = PatternType.fromString(acl.getSpec().getResourcePatternType().toString()); ResourcePattern resourcePattern = new ResourcePattern(ResourceType.GROUP, - "connect-" + acl.getSpec().getResource(), - patternType); + "connect-" + acl.getSpec().getResource(), + patternType); return List.of( - new AclBinding( - resourcePattern, - new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.READ, AclPermissionType.ALLOW) - ) + new AclBinding( + resourcePattern, + new org.apache.kafka.common.acl.AccessControlEntry(USER_PRINCIPAL + kafkaUser, "*", AclOperation.READ, + AclPermissionType.ALLOW) + ) ); } /** - * Get ACL operations from given resource type + * Get ACL operations from given resource type. * * @param resourceType The resource type * @return A list of ACL operations */ private List computeAclOperationForOwner(ResourceType resourceType) { - switch (resourceType) { - case TOPIC: - return List.of(AclOperation.WRITE, AclOperation.READ, AclOperation.DESCRIBE_CONFIGS); - case GROUP: - return List.of(AclOperation.READ); - case TRANSACTIONAL_ID: - return List.of(AclOperation.DESCRIBE, AclOperation.WRITE); - case CLUSTER,DELEGATION_TOKEN: - default: - throw new IllegalArgumentException("Not implemented yet: " + resourceType); - } + return switch (resourceType) { + case TOPIC -> List.of(AclOperation.WRITE, AclOperation.READ, AclOperation.DESCRIBE_CONFIGS); + case GROUP -> List.of(AclOperation.READ); + case TRANSACTIONAL_ID -> List.of(AclOperation.DESCRIBE, AclOperation.WRITE); + default -> throw new IllegalArgumentException("Not implemented yet: " + resourceType); + }; } /** - * Delete a given list of ACLs + * Delete a given list of ACLs. * * @param toDelete The list of ACLs to delete */ - private void deleteACLs(List toDelete) { + private void deleteAcls(List toDelete) { getAdminClient() - .deleteAcls(toDelete.stream() - .map(AclBinding::toFilter) - .toList()) - .values().forEach((key, value) -> { - try { - value.get(10, TimeUnit.SECONDS); - log.info("Success deleting ACL {} on {}", key, this.kafkaAsyncExecutorConfig.getName()); - } catch (InterruptedException e) { - log.error("Error", e); - Thread.currentThread().interrupt(); - } catch (Exception e) { - log.error(String.format("Error while deleting ACL %s on %s", key, this.kafkaAsyncExecutorConfig.getName()), e); - } - }); + .deleteAcls(toDelete.stream() + .map(AclBinding::toFilter) + .toList()) + .values().forEach((key, value) -> { + try { + value.get(10, TimeUnit.SECONDS); + log.info("Success deleting ACL {} on {}", key, managedClusterProperties.getName()); + } catch (InterruptedException e) { + log.error("Error", e); + Thread.currentThread().interrupt(); + } catch (Exception e) { + log.error( + String.format("Error while deleting ACL %s on %s", key, + managedClusterProperties.getName()), e); + } + }); } /** - * Delete a given Ns4Kafka ACL - * Convert Ns4Kafka ACL into Kafka ACLs before deletion + * Delete a given Ns4Kafka ACL. + * Convert Ns4Kafka ACL into Kafka ACLs before deletion. * * @param namespace The namespace - * @param ns4kafkaACL The Kafka ACL + * @param ns4kafkaAcl The Kafka ACL */ - public void deleteNs4KafkaACL(Namespace namespace, AccessControlEntry ns4kafkaACL) { - if (kafkaAsyncExecutorConfig.isManageAcls()) { + public void deleteNs4KafkaAcl(Namespace namespace, AccessControlEntry ns4kafkaAcl) { + if (managedClusterProperties.isManageAcls()) { List results = new ArrayList<>(); - if (List.of(TOPIC, GROUP, TRANSACTIONAL_ID).contains(ns4kafkaACL.getSpec().getResourceType())) { - results.addAll(buildAclBindingsFromAccessControlEntry(ns4kafkaACL, namespace.getSpec().getKafkaUser())); + if (List.of(TOPIC, GROUP, TRANSACTIONAL_ID).contains(ns4kafkaAcl.getSpec().getResourceType())) { + results.addAll(buildAclBindingsFromAccessControlEntry(ns4kafkaAcl, namespace.getSpec().getKafkaUser())); } - if (ns4kafkaACL.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT && - ns4kafkaACL.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) { - results.addAll(buildAclBindingsFromConnector(ns4kafkaACL, namespace.getSpec().getKafkaUser())); + if (ns4kafkaAcl.getSpec().getResourceType() == AccessControlEntry.ResourceType.CONNECT + && ns4kafkaAcl.getSpec().getPermission() == AccessControlEntry.Permission.OWNER) { + results.addAll(buildAclBindingsFromConnector(ns4kafkaAcl, namespace.getSpec().getKafkaUser())); } - deleteACLs(results); + deleteAcls(results); } } /** - * Delete a given Kafka Streams + * Delete a given Kafka Streams. * * @param namespace The namespace * @param kafkaStream The Kafka Streams */ public void deleteKafkaStreams(Namespace namespace, KafkaStream kafkaStream) { - if (kafkaAsyncExecutorConfig.isManageAcls()) { - List results = new ArrayList<>(buildAclBindingsFromKafkaStream(kafkaStream, namespace.getSpec().getKafkaUser())); - deleteACLs(results); + if (managedClusterProperties.isManageAcls()) { + List results = + new ArrayList<>(buildAclBindingsFromKafkaStream(kafkaStream, namespace.getSpec().getKafkaUser())); + deleteAcls(results); } } /** - * Create a given list of ACLs + * Create a given list of ACLs. * * @param toCreate The list of ACLs to create */ - private void createACLs(List toCreate) { + private void createAcls(List toCreate) { getAdminClient().createAcls(toCreate) - .values() - .forEach((key, value) -> { - try { - value.get(10, TimeUnit.SECONDS); - log.info("Success creating ACL {} on {}", key, this.kafkaAsyncExecutorConfig.getName()); - } catch (InterruptedException e) { - log.error("Error", e); - Thread.currentThread().interrupt(); - } catch (Exception e) { - log.error(String.format("Error while creating ACL %s on %s", key, this.kafkaAsyncExecutorConfig.getName()), e); - } - }); + .values() + .forEach((key, value) -> { + try { + value.get(10, TimeUnit.SECONDS); + log.info("Success creating ACL {} on {}", key, this.managedClusterProperties.getName()); + } catch (InterruptedException e) { + log.error("Error", e); + Thread.currentThread().interrupt(); + } catch (Exception e) { + log.error(String.format("Error while creating ACL %s on %s", key, + this.managedClusterProperties.getName()), e); + } + }); } /** - * Getter for admin client service + * Getter for admin client service. * * @return The admin client */ private Admin getAdminClient() { - return kafkaAsyncExecutorConfig.getAdminClient(); + return managedClusterProperties.getAdminClient(); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java index af93a640..09464a1c 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/ConnectorAsyncExecutor.java @@ -1,9 +1,9 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; import com.michelin.ns4kafka.models.connector.Connector; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.ConnectorRepository; import com.michelin.ns4kafka.services.ConnectClusterService; import com.michelin.ns4kafka.services.clients.connect.KafkaConnectClient; @@ -14,53 +14,55 @@ import io.micronaut.http.client.exceptions.HttpClientResponseException; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +/** + * Connector executor. + */ @Slf4j -@EachBean(KafkaAsyncExecutorConfig.class) +@EachBean(ManagedClusterProperties.class) @Singleton public class ConnectorAsyncExecutor { - private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; - + private final ManagedClusterProperties managedClusterProperties; + private final Set healthyConnectClusters = new HashSet<>(); + private final Set idleConnectClusters = new HashSet<>(); @Inject private ConnectorRepository connectorRepository; - @Inject private KafkaConnectClient kafkaConnectClient; - @Inject private ConnectClusterService connectClusterService; - private final Set healthyConnectClusters = new HashSet<>(); - private final Set idleConnectClusters = new HashSet<>(); - - public ConnectorAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; + public ConnectorAsyncExecutor(ManagedClusterProperties managedClusterProperties) { + this.managedClusterProperties = managedClusterProperties; } /** - * Start connector synchronization + * Run the connector synchronization. + * + * @return A flux of connector info */ public Flux run() { - if (kafkaAsyncExecutorConfig.isManageConnectors()) { + if (managedClusterProperties.isManageConnectors()) { return synchronizeConnectors(); } return Flux.empty(); } /** - * Start connector synchronization + * Run the health check. + * + * @return A flux of connect cluster */ public Flux runHealthCheck() { - if (kafkaAsyncExecutorConfig.isManageConnectors()) { + if (managedClusterProperties.isManageConnectors()) { return checkConnectClusterHealth(); } return Flux.empty(); @@ -68,69 +70,79 @@ public Flux runHealthCheck() { /** * Get all connect clusters of the current Kafka cluster execution, including - * both self-declared Connect clusters and hard-declared Connect clusters + * both self-declared Connect clusters and hard-declared Connect clusters. + * * @return A list of Connect clusters */ private Flux getConnectClusters() { return connectClusterService.findAll(true) - .filter(connectCluster -> connectCluster.getMetadata().getCluster().equals(kafkaAsyncExecutorConfig.getName())); + .filter( + connectCluster -> connectCluster.getMetadata().getCluster().equals(managedClusterProperties.getName())); } /** - * Check connect cluster health + * Check connect cluster health. + * * @return The list of healthy connect cluster */ private Flux checkConnectClusterHealth() { return getConnectClusters() - .doOnNext(connectCluster -> { - if (connectCluster.getSpec().getStatus().equals(ConnectCluster.Status.HEALTHY)) { - log.debug("Kafka Connect \"" + connectCluster.getMetadata().getName() + "\" is healthy."); - healthyConnectClusters.add(connectCluster.getMetadata().getName()); - idleConnectClusters.remove(connectCluster.getMetadata().getName()); - } else if (connectCluster.getSpec().getStatus().equals(ConnectCluster.Status.IDLE)) { - log.debug("Kafka Connect \"" + connectCluster.getMetadata().getName() + "\" is not healthy: " + connectCluster.getSpec().getStatusMessage() + "."); - idleConnectClusters.add(connectCluster.getMetadata().getName()); - healthyConnectClusters.remove(connectCluster.getMetadata().getName()); - } - }); + .doOnNext(connectCluster -> { + if (connectCluster.getSpec().getStatus().equals(ConnectCluster.Status.HEALTHY)) { + log.debug("Kafka Connect \"" + connectCluster.getMetadata().getName() + "\" is healthy."); + healthyConnectClusters.add(connectCluster.getMetadata().getName()); + idleConnectClusters.remove(connectCluster.getMetadata().getName()); + } else if (connectCluster.getSpec().getStatus().equals(ConnectCluster.Status.IDLE)) { + log.debug("Kafka Connect \"" + connectCluster.getMetadata().getName() + "\" is not healthy: " + + connectCluster.getSpec().getStatusMessage() + "."); + idleConnectClusters.add(connectCluster.getMetadata().getName()); + healthyConnectClusters.remove(connectCluster.getMetadata().getName()); + } + }); } /** - * For each connect cluster, start the synchronization of connectors + * For each connect cluster, start the synchronization of connectors. */ private Flux synchronizeConnectors() { - log.debug("Starting connector synchronization for Kafka cluster {}. Healthy Kafka Connects: {}. Idle Kafka Connects: {}", - kafkaAsyncExecutorConfig.getName(), - !healthyConnectClusters.isEmpty() ? String.join(",", healthyConnectClusters) : "N/A", - !idleConnectClusters.isEmpty() ? String.join(",", idleConnectClusters) : "N/A"); + log.debug( + "Starting connector synchronization for Kafka cluster {}. Healthy Kafka Connects: {}." + + " Idle Kafka Connects: {}", managedClusterProperties.getName(), + !healthyConnectClusters.isEmpty() ? String.join(",", healthyConnectClusters) : "N/A", + !idleConnectClusters.isEmpty() ? String.join(",", idleConnectClusters) : "N/A"); if (healthyConnectClusters.isEmpty()) { - log.debug("No healthy Kafka Connect for Kafka cluster {}. Skipping synchronization.", kafkaAsyncExecutorConfig.getName()); + log.debug("No healthy Kafka Connect for Kafka cluster {}. Skipping synchronization.", + managedClusterProperties.getName()); return Flux.empty(); } return Flux.fromIterable(healthyConnectClusters) - .flatMap(this::synchronizeConnectCluster); + .flatMap(this::synchronizeConnectCluster); } /** - * Synchronize connectors of given connect cluster + * Synchronize connectors of given connect cluster. + * * @param connectCluster The connect cluster */ private Flux synchronizeConnectCluster(String connectCluster) { log.debug("Starting connector collection for Kafka cluster {} and Kafka Connect {}.", - kafkaAsyncExecutorConfig.getName(), connectCluster); + managedClusterProperties.getName(), connectCluster); return collectBrokerConnectors(connectCluster) .doOnError(error -> { if (error instanceof HttpClientResponseException httpClientResponseException) { - log.error("Invalid HTTP response {} ({}) during connectors synchronization for Kafka cluster {} and Kafka Connect {}.", - httpClientResponseException.getStatus(), httpClientResponseException.getResponse().getStatus(), - kafkaAsyncExecutorConfig.getName(), connectCluster); + log.error( + "Invalid HTTP response {} ({}) during connectors synchronization for Kafka cluster {}" + + " and Kafka Connect {}.", + httpClientResponseException.getStatus(), httpClientResponseException.getResponse().getStatus(), + managedClusterProperties.getName(), connectCluster); } else { - log.error("Exception during connectors synchronization for Kafka cluster {} and Kafka Connect {}: {}.", - kafkaAsyncExecutorConfig.getName(), connectCluster, error.getMessage()); + log.error( + "Exception during connectors synchronization for Kafka cluster {} and Kafka Connect {}: {}.", + managedClusterProperties.getName(), connectCluster, error.getMessage()); } }) .collectList() @@ -138,84 +150,93 @@ private Flux synchronizeConnectCluster(String connectCluster) { List ns4kafkaConnectors = collectNs4KafkaConnectors(connectCluster); List toCreate = ns4kafkaConnectors.stream() - .filter(connector -> brokerConnectors.stream().noneMatch(connector1 -> connector1.getMetadata().getName().equals(connector.getMetadata().getName()))) - .toList(); + .filter(connector -> brokerConnectors.stream().noneMatch( + connector1 -> connector1.getMetadata().getName().equals(connector.getMetadata().getName()))) + .toList(); List toUpdate = ns4kafkaConnectors.stream() - .filter(connector -> brokerConnectors.stream() - .anyMatch(connector1 -> { - if (connector1.getMetadata().getName().equals(connector.getMetadata().getName())) { - return !connectorsAreSame(connector, connector1); - } - return false; - })) - .toList(); + .filter(connector -> brokerConnectors.stream() + .anyMatch(connector1 -> { + if (connector1.getMetadata().getName().equals(connector.getMetadata().getName())) { + return !connectorsAreSame(connector, connector1); + } + return false; + })) + .toList(); if (!toCreate.isEmpty()) { - log.debug("Connector(s) to create: " + String.join(",", toCreate.stream().map(connector -> connector.getMetadata().getName()).toList())); + log.debug("Connector(s) to create: " + String.join(",", + toCreate.stream().map(connector -> connector.getMetadata().getName()).toList())); } if (!toUpdate.isEmpty()) { - log.debug("Connector(s) to update: " + String.join(",", toUpdate.stream().map(connector -> connector.getMetadata().getName()).toList())); + log.debug("Connector(s) to update: " + String.join(",", + toUpdate.stream().map(connector -> connector.getMetadata().getName()).toList())); } return Flux.fromStream(Stream.concat(toCreate.stream(), toUpdate.stream())) - .flatMap(this::deployConnector); + .flatMap(this::deployConnector); }); } /** - * Collect the connectors deployed on the given connect cluster + * Collect the connectors deployed on the given connect cluster. + * * @param connectCluster The connect cluster * @return A list of connectors */ public Flux collectBrokerConnectors(String connectCluster) { - return kafkaConnectClient.listAll(kafkaAsyncExecutorConfig.getName(), connectCluster) - .flatMapMany(connectors -> { - log.debug("{} connectors found on Kafka Connect {} of Kafka cluster {}.", connectors.size(), connectCluster, kafkaAsyncExecutorConfig.getName()); + return kafkaConnectClient.listAll(managedClusterProperties.getName(), connectCluster) + .flatMapMany(connectors -> { + log.debug("{} connectors found on Kafka Connect {} of Kafka cluster {}.", connectors.size(), + connectCluster, managedClusterProperties.getName()); - return Flux.fromIterable(connectors.values()) - .map(connectorStatus -> buildConnectorFromConnectorStatus(connectorStatus, connectCluster)); - }); + return Flux.fromIterable(connectors.values()) + .map(connectorStatus -> buildConnectorFromConnectorStatus(connectorStatus, connectCluster)); + }); } /** - * Build a connector from a given connector status + * Build a connector from a given connector status. + * * @param connectorStatus The connector status - * @param connectCluster The connect cluster + * @param connectCluster The connect cluster * @return The built connector */ private Connector buildConnectorFromConnectorStatus(ConnectorStatus connectorStatus, String connectCluster) { return Connector.builder() - .metadata(ObjectMeta.builder() - // Any other metadata is not useful for this process - .name(connectorStatus.info().name()) - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster(connectCluster) - .config(connectorStatus.info().config()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + // Any other metadata is not useful for this process + .name(connectorStatus.info().name()) + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster(connectCluster) + .config(connectorStatus.info().config()) + .build()) + .build(); } /** - * Collect the connectors from Ns4Kafka deployed on the given connect cluster + * Collect the connectors from Ns4Kafka deployed on the given connect cluster. + * * @param connectCluster The connect cluster * @return A list of connectors */ private List collectNs4KafkaConnectors(String connectCluster) { - List connectorList = connectorRepository.findAllForCluster(kafkaAsyncExecutorConfig.getName()) - .stream() - .filter(connector -> connector.getSpec().getConnectCluster().equals(connectCluster)) - .toList(); - log.debug("{} connectors found in Ns4kafka for Kafka Connect {} of Kafka cluster {}.", connectorList.size(), connectCluster, kafkaAsyncExecutorConfig.getName()); + List connectorList = connectorRepository.findAllForCluster(managedClusterProperties.getName()) + .stream() + .filter(connector -> connector.getSpec().getConnectCluster().equals(connectCluster)) + .toList(); + log.debug("{} connectors found in Ns4kafka for Kafka Connect {} of Kafka cluster {}.", connectorList.size(), + connectCluster, managedClusterProperties.getName()); return connectorList; } /** - * Check if both given connectors are equal + * Check if both given connectors are equal. + * * @param expected The first connector - * @param actual The second connector + * @param actual The second connector * @return true it they are, false otherwise */ private boolean connectorsAreSame(Connector expected, Connector actual) { @@ -227,21 +248,29 @@ private boolean connectorsAreSame(Connector expected, Connector actual) { } return expectedMap.entrySet() - .stream() - .allMatch(e -> (e.getValue() == null && actualMap.get(e.getKey()) == null) - || (e.getValue() != null && e.getValue().equals(actualMap.get(e.getKey())))); + .stream() + .allMatch(e -> (e.getValue() == null && actualMap.get(e.getKey()) == null) + || (e.getValue() != null && e.getValue().equals(actualMap.get(e.getKey())))); } /** - * Deploy a given connector to associated connect cluster + * Deploy a given connector to associated connect cluster. + * * @param connector The connector to deploy */ private Mono deployConnector(Connector connector) { - return kafkaConnectClient.createOrUpdate(kafkaAsyncExecutorConfig.getName(), connector.getSpec().getConnectCluster(), - connector.getMetadata().getName(), ConnectorSpecs.builder().config(connector.getSpec().getConfig()).build()) - .doOnSuccess(httpResponse -> log.info("Success deploying connector {} on Kafka Connect {} of Kafka cluster {}.", - connector.getMetadata().getName(), connector.getSpec().getConnectCluster(), kafkaAsyncExecutorConfig.getName())) - .doOnError(httpError -> log.error("Error deploying connector {} on Kafka Connect {} of Kafka cluster {}: {}", - connector.getMetadata().getName(), connector.getSpec().getConnectCluster(), kafkaAsyncExecutorConfig.getName(), httpError.getMessage())); + return kafkaConnectClient.createOrUpdate(managedClusterProperties.getName(), + connector.getSpec().getConnectCluster(), + connector.getMetadata().getName(), ConnectorSpecs.builder() + .config(connector.getSpec().getConfig()) + .build()) + .doOnSuccess( + httpResponse -> log.info("Success deploying connector {} on Kafka Connect {} of Kafka cluster {}.", + connector.getMetadata().getName(), connector.getSpec().getConnectCluster(), + managedClusterProperties.getName())) + .doOnError( + httpError -> log.error("Error deploying connector {} on Kafka Connect {} of Kafka cluster {}: {}", + connector.getMetadata().getName(), connector.getSpec().getConnectCluster(), + managedClusterProperties.getName(), httpError.getMessage())); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java index a256de8a..04b2e41f 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/ConsumerGroupAsyncExecutor.java @@ -1,15 +1,8 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import io.micronaut.context.annotation.EachBean; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.ConsumerGroupDescription; -import org.apache.kafka.clients.admin.OffsetSpec; -import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.common.TopicPartition; - import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -17,145 +10,177 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.ConsumerGroupDescription; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +/** + * Consumer group executor. + */ @Slf4j -@EachBean(KafkaAsyncExecutorConfig.class) +@EachBean(ManagedClusterProperties.class) @Singleton public class ConsumerGroupAsyncExecutor { - private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; + private final ManagedClusterProperties managedClusterProperties; - public ConsumerGroupAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; + public ConsumerGroupAsyncExecutor(ManagedClusterProperties managedClusterProperties) { + this.managedClusterProperties = managedClusterProperties; } /** - * Getter for Kafka Admin client + * Getter for Kafka Admin client. + * * @return A Kafka Admin client instance */ private Admin getAdminClient() { - return kafkaAsyncExecutorConfig.getAdminClient(); + return managedClusterProperties.getAdminClient(); } - public Map describeConsumerGroups(List groupIds) throws ExecutionException, InterruptedException { + public Map describeConsumerGroups(List groupIds) + throws ExecutionException, InterruptedException { return getAdminClient().describeConsumerGroups(groupIds).all().get(); } + /** + * Alter the offsets of a given consumer group. + * + * @param consumerGroupId The consumer group + * @param preparedOffsets The offsets to set + * @throws InterruptedException Any interrupted exception during offsets alteration + * @throws ExecutionException Any execution exception during offsets alteration + */ public void alterConsumerGroupOffsets(String consumerGroupId, Map preparedOffsets) - throws InterruptedException, ExecutionException { + throws InterruptedException, ExecutionException { getAdminClient().alterConsumerGroupOffsets(consumerGroupId, - preparedOffsets.entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))) + preparedOffsets.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))) ).all().get(); - log.info("Consumer Group {} changed offset", consumerGroupId); + log.info("Consumer group {} changed offset", consumerGroupId); if (log.isDebugEnabled()) { - preparedOffsets.forEach((topicPartition, offset)-> log.debug("TopicPartition {} has the new offset {}", topicPartition, offset)); + preparedOffsets.forEach( + (topicPartition, offset) -> log.debug("Topic-Partition {} has the new offset {}", topicPartition, + offset)); } } /** * Find offsets matching the offset specs for given partition (e.g.: last offset for latest spec) + * * @param offsetsForTheSpec The offset specs * @return A map of topic-partition and offsets - * @throws ExecutionException Any execution exception during offsets description + * @throws ExecutionException Any execution exception during offsets description * @throws InterruptedException Any interrupted exception during offsets description */ - public Map listOffsets(Map offsetsForTheSpec) throws InterruptedException, ExecutionException { + public Map listOffsets(Map offsetsForTheSpec) + throws InterruptedException, ExecutionException { return getAdminClient().listOffsets(offsetsForTheSpec) - .all() - .get() - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, kv -> kv.getValue().offset())); + .all() + .get() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, kv -> kv.getValue().offset())); } /** - * Get all the committed offsets of a given consumer group + * Get all the committed offsets of a given consumer group. + * * @param groupId The consumer group * @return A map of topic-partition and committed offset number - * @throws ExecutionException Any execution exception during consumer groups description + * @throws ExecutionException Any execution exception during consumer groups description * @throws InterruptedException Any interrupted exception during consumer groups description */ - public Map getCommittedOffsets(String groupId) throws ExecutionException, InterruptedException { + public Map getCommittedOffsets(String groupId) + throws ExecutionException, InterruptedException { return getAdminClient().listConsumerGroupOffsets(groupId) - .partitionsToOffsetAndMetadata() - .get() - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset())); + .partitionsToOffsetAndMetadata() + .get() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset())); } /** - * Get the list of partitions of a given topic + * Get the list of partitions of a given topic. + * * @param topicName The topic name * @return A list of partitions - * @throws ExecutionException Any execution exception during topics description + * @throws ExecutionException Any execution exception during topics description * @throws InterruptedException Any interrupted exception during topics description */ public List getTopicPartitions(String topicName) throws ExecutionException, InterruptedException { return getAdminClient().describeTopics(Collections.singletonList(topicName)) - .all() - .get() - .get(topicName) - .partitions() - .stream() - .map(partitionInfo -> new TopicPartition(topicName, partitionInfo.partition())) - .toList(); + .all() + .get() + .get(topicName) + .partitions() + .stream() + .map(partitionInfo -> new TopicPartition(topicName, partitionInfo.partition())) + .toList(); } /** - * Get earliest offsets for given list of topic-partitions + * Get earliest offsets for given list of topic-partitions. + * * @param partitionsToReset The topic-partitions list * @return A map of topic-partition and offsets - * @throws ExecutionException Any execution exception during offsets description + * @throws ExecutionException Any execution exception during offsets description * @throws InterruptedException Any interrupted exception during offsets description */ - public Map getLogStartOffsets(List partitionsToReset) throws ExecutionException, InterruptedException { + public Map getLogStartOffsets(List partitionsToReset) + throws ExecutionException, InterruptedException { Map startOffsets = partitionsToReset - .stream() - .collect(Collectors.toMap(Function.identity(), v -> OffsetSpec.earliest())); + .stream() + .collect(Collectors.toMap(Function.identity(), v -> OffsetSpec.earliest())); return listOffsets(startOffsets); } /** - * Get latest offsets for given list of topic-partitions + * Get latest offsets for given list of topic-partitions. + * * @param partitionsToReset The topic-partitions list * @return A map of topic-partition and offsets - * @throws ExecutionException Any execution exception during offsets description + * @throws ExecutionException Any execution exception during offsets description * @throws InterruptedException Any interrupted exception during offsets description */ - public Map getLogEndOffsets(List partitionsToReset) throws ExecutionException, InterruptedException { + public Map getLogEndOffsets(List partitionsToReset) + throws ExecutionException, InterruptedException { Map endOffsets = partitionsToReset - .stream() - .collect(Collectors.toMap(Function.identity(), v -> OffsetSpec.latest())); + .stream() + .collect(Collectors.toMap(Function.identity(), v -> OffsetSpec.latest())); return listOffsets(endOffsets); } /** - * Get offsets from timestamp for given list of topic-partitions + * Get offsets from timestamp for given list of topic-partitions. + * * @param partitionsToReset The topic-partitions list - * @param timestamp The timestamp used to filter + * @param timestamp The timestamp used to filter * @return A map of topic-partition and offsets - * @throws ExecutionException Any execution exception during offsets description + * @throws ExecutionException Any execution exception during offsets description * @throws InterruptedException Any interrupted exception during offsets description */ - public Map getLogTimestampOffsets(List partitionsToReset, long timestamp) throws ExecutionException, InterruptedException { + public Map getLogTimestampOffsets(List partitionsToReset, long timestamp) + throws ExecutionException, InterruptedException { Map dateOffsets = partitionsToReset - .stream() - .collect(Collectors.toMap(Function.identity(), e -> OffsetSpec.forTimestamp(timestamp))); + .stream() + .collect(Collectors.toMap(Function.identity(), e -> OffsetSpec.forTimestamp(timestamp))); // list offsets for this timestamp Map offsets = listOffsets(dateOffsets); // extract successful offsets (>= 0) Map successfulLogTimestampOffsets = offsets.entrySet().stream() - .filter(e -> e.getValue() >= 0) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .filter(e -> e.getValue() >= 0) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); // extract failed offsets partitions (== -1) List unsuccessfulPartitions = offsets.entrySet().stream() - .filter(e -> e.getValue() == -1L) - .map(Map.Entry::getKey) - .toList(); + .filter(e -> e.getValue() == -1L) + .map(Map.Entry::getKey) + .toList(); // reprocess failed offsets to OffsetSpec.latest() Map reprocessedUnsuccessfulOffsets = getLogEndOffsets(unsuccessfulPartitions); @@ -165,13 +190,15 @@ public Map getLogTimestampOffsets(List par } /** - * Check if given offsets for topic-partitions are properly between earliest and latest offsets + * Check if given offsets for topic-partitions are properly between earliest and latest offsets. + * * @param requestedOffsets The offsets to check for topic-partitions * @return A map of topic-partition and offsets with no unbound offsets - * @throws ExecutionException Any execution exception during offsets description + * @throws ExecutionException Any execution exception during offsets description * @throws InterruptedException Any interrupted exception during offsets description */ - public Map checkOffsetsRange(Map requestedOffsets) throws ExecutionException, InterruptedException { + public Map checkOffsetsRange(Map requestedOffsets) + throws ExecutionException, InterruptedException { // lower bound Map logStartOffsets = getLogStartOffsets(new ArrayList<>(requestedOffsets.keySet())); // upper bound @@ -179,17 +206,17 @@ public Map checkOffsetsRange(Map req // replace inside boundaries if required return requestedOffsets.entrySet().stream() - .map(entry -> { - if (entry.getValue() > logEndOffsets.get(entry.getKey())) { - // went too much forward - return Map.entry(entry.getKey(), logEndOffsets.get(entry.getKey())); - } else if (entry.getValue() < logStartOffsets.get(entry.getKey())) { - // went too much backward - return Map.entry(entry.getKey(), logStartOffsets.get(entry.getKey())); - } else { - return entry; - } - }) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .map(entry -> { + if (entry.getValue() > logEndOffsets.get(entry.getKey())) { + // went too much forward + return Map.entry(entry.getKey(), logEndOffsets.get(entry.getKey())); + } else if (entry.getValue() < logStartOffsets.get(entry.getKey())) { + // went too much backward + return Map.entry(entry.getKey(), logStartOffsets.get(entry.getKey())); + } else { + return entry; + } + }) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java b/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java index 96d72907..c387d963 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/KafkaAsyncExecutorScheduler.java @@ -1,50 +1,46 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorInfo; import io.micronaut.runtime.event.ApplicationStartupEvent; import io.micronaut.runtime.event.annotation.EventListener; import io.micronaut.scheduling.annotation.Scheduled; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.time.Duration; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; +import lombok.extern.slf4j.Slf4j; +import reactor.core.publisher.Flux; +/** + * Schedule the asynchronous executors. + */ @Slf4j @Singleton public class KafkaAsyncExecutorScheduler { + private final AtomicBoolean ready = new AtomicBoolean(false); @Inject List topicAsyncExecutors; - @Inject List accessControlEntryAsyncExecutors; - @Inject List connectorAsyncExecutors; - @Inject List userAsyncExecutors; - private final AtomicBoolean ready = new AtomicBoolean(false); - /** - * Register when the application is ready + * Register when the application is ready. + * * @param event The application start event */ @EventListener public void onStartupEvent(ApplicationStartupEvent event) { - ready.compareAndSet(false,true); + ready.compareAndSet(false, true); scheduleConnectHealthCheck(); scheduleConnectorSynchronization(); } /** - * Schedule resource synchronization + * Schedule resource synchronization. */ @Scheduled(initialDelay = "12s", fixedDelay = "20s") public void schedule() { @@ -58,26 +54,32 @@ public void schedule() { } /** - * Schedule connector synchronization + * Schedule connector synchronization. */ public void scheduleConnectorSynchronization() { Flux.interval(Duration.ofSeconds(12), Duration.ofSeconds(30)) - .onBackpressureDrop(onDropped -> log.debug("Skipping next connector synchronization. The previous one is still running.")) - .concatMap(mapper -> Flux.fromIterable(connectorAsyncExecutors) - .flatMap(ConnectorAsyncExecutor::run)) - .onErrorContinue((error, body) -> log.trace("Continue connector synchronization after error: " + error.getMessage() + ".")) - .subscribe(connectorInfo -> log.trace("Synchronization completed for connector \"" + connectorInfo.name() + "\".")); + .onBackpressureDrop( + onDropped -> log.debug("Skipping next connector synchronization. The previous one is still running.")) + .concatMap(mapper -> Flux.fromIterable(connectorAsyncExecutors) + .flatMap(ConnectorAsyncExecutor::run)) + .onErrorContinue((error, body) -> log.trace( + "Continue connector synchronization after error: " + error.getMessage() + ".")) + .subscribe(connectorInfo -> log.trace( + "Synchronization completed for connector \"" + connectorInfo.name() + "\".")); } /** - * Schedule connector synchronization + * Schedule connector synchronization. */ public void scheduleConnectHealthCheck() { Flux.interval(Duration.ofSeconds(5), Duration.ofMinutes(1)) - .onBackpressureDrop(onDropped -> log.debug("Skipping next Connect cluster health check. The previous one is still running.")) - .concatMap(mapper -> Flux.fromIterable(connectorAsyncExecutors) - .flatMap(ConnectorAsyncExecutor::runHealthCheck)) - .onErrorContinue((error, body) -> log.trace("Continue Connect cluster health check after error: " + error.getMessage() + ".")) - .subscribe(connectCluster -> log.trace("Health check completed for Connect cluster \"" + connectCluster.getMetadata().getName() + "\".")); + .onBackpressureDrop(onDropped -> log.debug( + "Skipping next Connect cluster health check. The previous one is still running.")) + .concatMap(mapper -> Flux.fromIterable(connectorAsyncExecutors) + .flatMap(ConnectorAsyncExecutor::runHealthCheck)) + .onErrorContinue((error, body) -> log.trace( + "Continue Connect cluster health check after error: " + error.getMessage() + ".")) + .subscribe(connectCluster -> log.trace( + "Health check completed for Connect cluster \"" + connectCluster.getMetadata().getName() + "\".")); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java index 64593d24..c0afee25 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/TopicAsyncExecutor.java @@ -1,94 +1,117 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.TopicRepository; import com.michelin.ns4kafka.repositories.kafka.KafkaStoreException; import io.micronaut.context.annotation.EachBean; import jakarta.inject.Inject; import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.admin.*; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.ConfigResource; - import java.time.Instant; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Function; import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.AlterConfigsResult; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.RecordsToDelete; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.admin.TopicListing; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigResource; +/** + * Topic executor. + */ @Slf4j -@EachBean(KafkaAsyncExecutorConfig.class) +@EachBean(ManagedClusterProperties.class) @Singleton public class TopicAsyncExecutor { - private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; + private final ManagedClusterProperties managedClusterProperties; @Inject TopicRepository topicRepository; - public TopicAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; + public TopicAsyncExecutor(ManagedClusterProperties managedClusterProperties) { + this.managedClusterProperties = managedClusterProperties; } - private Admin getAdminClient(){ - return kafkaAsyncExecutorConfig.getAdminClient(); + private Admin getAdminClient() { + return managedClusterProperties.getAdminClient(); } /** - * Start topic synchronization + * Run the topic synchronization. */ public void run() { - if (this.kafkaAsyncExecutorConfig.isManageTopics()) { + if (this.managedClusterProperties.isManageTopics()) { synchronizeTopics(); } } /** - * Start the synchronization of topics + * Start the topic synchronization. */ public void synchronizeTopics() { - log.debug("Starting topic collection for cluster {}", kafkaAsyncExecutorConfig.getName()); + log.debug("Starting topic collection for cluster {}", managedClusterProperties.getName()); try { Map brokerTopics = collectBrokerTopics(); - List ns4kafkaTopics = topicRepository.findAllForCluster(kafkaAsyncExecutorConfig.getName()); + List ns4kafkaTopics = topicRepository.findAllForCluster(managedClusterProperties.getName()); List toCreate = ns4kafkaTopics.stream() - .filter(topic -> !brokerTopics.containsKey(topic.getMetadata().getName())) - .toList(); + .filter(topic -> !brokerTopics.containsKey(topic.getMetadata().getName())) + .toList(); List toCheckConf = ns4kafkaTopics.stream() - .filter(topic -> brokerTopics.containsKey(topic.getMetadata().getName())) - .toList(); + .filter(topic -> brokerTopics.containsKey(topic.getMetadata().getName())) + .toList(); Map> toUpdate = toCheckConf.stream() - .map(topic -> { - Map actualConf = brokerTopics.get(topic.getMetadata().getName()).getSpec().getConfigs(); - Map expectedConf = topic.getSpec().getConfigs() == null ? Map.of() : topic.getSpec().getConfigs(); - Collection topicConfigChanges = computeConfigChanges(expectedConf,actualConf); - if (!topicConfigChanges.isEmpty()) { - ConfigResource cr = new ConfigResource(ConfigResource.Type.TOPIC, topic.getMetadata().getName()); - return Map.entry(cr,topicConfigChanges); - } - return null; - }) - .filter(Objects::nonNull) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .map(topic -> { + Map actualConf = + brokerTopics.get(topic.getMetadata().getName()).getSpec().getConfigs(); + Map expectedConf = + topic.getSpec().getConfigs() == null ? Map.of() : topic.getSpec().getConfigs(); + Collection topicConfigChanges = computeConfigChanges(expectedConf, actualConf); + if (!topicConfigChanges.isEmpty()) { + ConfigResource cr = + new ConfigResource(ConfigResource.Type.TOPIC, topic.getMetadata().getName()); + return Map.entry(cr, topicConfigChanges); + } + return null; + }) + .filter(Objects::nonNull) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); if (!toCreate.isEmpty()) { - log.debug("Topic(s) to create: " + String.join("," , toCreate.stream().map(topic -> topic.getMetadata().getName()).toList())); + log.debug("Topic(s) to create: " + + String.join(",", toCreate.stream().map(topic -> topic.getMetadata().getName()).toList())); } if (!toUpdate.isEmpty()) { - log.debug("Topic(s) to update: " + String.join("," , toUpdate.keySet().stream().map(ConfigResource::name).toList())); + log.debug("Topic(s) to update: " + + String.join(",", toUpdate.keySet().stream().map(ConfigResource::name).toList())); for (Map.Entry> e : toUpdate.entrySet()) { for (AlterConfigOp op : e.getValue()) { - log.debug(e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "(" + op.configEntry().value() + ")"); + log.debug( + e.getKey().name() + " " + op.opType().toString() + " " + op.configEntry().name() + "(" + + op.configEntry().value() + ")"); } } } @@ -104,16 +127,19 @@ public void synchronizeTopics() { } /** - * Delete a topic + * Delete a topic. + * * @param topic The topic to delete */ public void deleteTopic(Topic topic) throws InterruptedException, ExecutionException, TimeoutException { getAdminClient().deleteTopics(List.of(topic.getMetadata().getName())).all().get(30, TimeUnit.SECONDS); - log.info("Success deleting topic {} on {}", topic.getMetadata().getName(), this.kafkaAsyncExecutorConfig.getName()); + log.info("Success deleting topic {} on {}", topic.getMetadata().getName(), + this.managedClusterProperties.getName()); } /** - * Collect all topics on broker + * Collect all topics on broker. + * * @return All topics by name */ public Map collectBrokerTopics() throws ExecutionException, InterruptedException, TimeoutException { @@ -121,73 +147,88 @@ public Map collectBrokerTopics() throws ExecutionException, Inter } /** - * List all topic names on broker + * List all topic names on broker. + * * @return All topic names */ public List listBrokerTopicNames() throws InterruptedException, ExecutionException, TimeoutException { return getAdminClient().listTopics().listings() - .get(30, TimeUnit.SECONDS) - .stream() - .map(TopicListing::name) - .toList(); + .get(30, TimeUnit.SECONDS) + .stream() + .map(TopicListing::name) + .toList(); } - public Map collectBrokerTopicsFromNames(List topicNames) throws InterruptedException, ExecutionException, TimeoutException { - Map topicDescriptions = getAdminClient().describeTopics(topicNames).all().get(); - // Create a Map> for all topics - // includes only Dynamic config properties + /** + * Collect all topics on broker from a list of topic names. + * + * @param topicNames The topic names + * @return All topics by name + * @throws InterruptedException Any interrupted exception + * @throws ExecutionException Any execution exception + * @throws TimeoutException Any timeout exception + */ + public Map collectBrokerTopicsFromNames(List topicNames) + throws InterruptedException, ExecutionException, TimeoutException { + Map topicDescriptions = getAdminClient().describeTopics(topicNames) + .allTopicNames().get(); + return getAdminClient() - .describeConfigs(topicNames.stream() - .map(s -> new ConfigResource(ConfigResource.Type.TOPIC, s)) - .toList()) - .all() - .get(30, TimeUnit.SECONDS) - .entrySet() - .stream() - .collect(Collectors.toMap( - configResourceConfigEntry -> configResourceConfigEntry.getKey().name(), - configResourceConfigEntry -> configResourceConfigEntry.getValue().entries() - .stream() - .filter( configEntry -> configEntry.source() == ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG) - .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)) - )) - .entrySet() - .stream() - .map(stringMapEntry -> Topic.builder() - .metadata(ObjectMeta.builder() - .cluster(kafkaAsyncExecutorConfig.getName()) - .name(stringMapEntry.getKey()) - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(topicDescriptions.get(stringMapEntry.getKey()).partitions().get(0).replicas().size()) - .partitions(topicDescriptions.get(stringMapEntry.getKey()).partitions().size()) - .configs(stringMapEntry.getValue()) - .build()) - .build() - ) - .collect(Collectors.toMap( topic -> topic.getMetadata().getName(), Function.identity())); + .describeConfigs(topicNames.stream() + .map(s -> new ConfigResource(ConfigResource.Type.TOPIC, s)) + .toList()) + .all() + .get(30, TimeUnit.SECONDS) + .entrySet() + .stream() + .collect(Collectors.toMap( + configResourceConfigEntry -> configResourceConfigEntry.getKey().name(), + configResourceConfigEntry -> configResourceConfigEntry.getValue().entries() + .stream() + .filter(configEntry -> configEntry.source() == ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG) + .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)) + )) + .entrySet() + .stream() + .map(stringMapEntry -> Topic.builder() + .metadata(ObjectMeta.builder() + .cluster(managedClusterProperties.getName()) + .name(stringMapEntry.getKey()) + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor( + topicDescriptions.get(stringMapEntry.getKey()).partitions().get(0).replicas().size()) + .partitions(topicDescriptions.get(stringMapEntry.getKey()).partitions().size()) + .configs(stringMapEntry.getValue()) + .build()) + .build() + ) + .collect(Collectors.toMap(topic -> topic.getMetadata().getName(), Function.identity())); } private void alterTopics(Map> toUpdate, List topics) { AlterConfigsResult alterConfigsResult = getAdminClient().incrementalAlterConfigs(toUpdate); alterConfigsResult.values().forEach((key, value) -> { - Topic updatedTopic = topics.stream().filter(t -> t.getMetadata().getName().equals(key.name())).findFirst().get(); + Topic updatedTopic = + topics.stream().filter(t -> t.getMetadata().getName().equals(key.name())).findFirst().get(); try { value.get(10, TimeUnit.SECONDS); - Collection ops = toUpdate.get(key); updatedTopic.getMetadata().setCreationTimestamp(Date.from(Instant.now())); updatedTopic.getMetadata().setGeneration(updatedTopic.getMetadata().getGeneration() + 1); updatedTopic.setStatus(Topic.TopicStatus.ofSuccess("Topic configs updated")); + log.info("Success updating topic configs {} on {}: [{}]", - key.name(), - kafkaAsyncExecutorConfig.getName(), - ops.stream().map(AlterConfigOp::toString).collect(Collectors.joining(","))); + key.name(), + managedClusterProperties.getName(), + toUpdate.get(key).stream().map(AlterConfigOp::toString).collect(Collectors.joining(","))); } catch (InterruptedException e) { log.error("Error", e); Thread.currentThread().interrupt(); } catch (Exception e) { - updatedTopic.setStatus(Topic.TopicStatus.ofFailed("Error while updating topic configs: " + e.getMessage())); - log.error(String.format("Error while updating topic configs %s on %s", key.name(), this.kafkaAsyncExecutorConfig.getName()), e); + updatedTopic.setStatus( + Topic.TopicStatus.ofFailed("Error while updating topic configs: " + e.getMessage())); + log.error(String.format("Error while updating topic configs %s on %s", key.name(), + this.managedClusterProperties.getName()), e); } topicRepository.create(updatedTopic); }); @@ -195,14 +236,15 @@ private void alterTopics(Map> toUpdate private void createTopics(List topics) { List newTopics = topics.stream() - .map(topic -> { - log.debug("Creating topic {} on {}",topic.getMetadata().getName(),topic.getMetadata().getCluster()); - NewTopic newTopic = new NewTopic(topic.getMetadata().getName(),topic.getSpec().getPartitions(), (short) topic.getSpec().getReplicationFactor()); - newTopic.configs(topic.getSpec().getConfigs()); - log.debug("{}",newTopic); - return newTopic; - }) - .toList(); + .map(topic -> { + log.debug("Creating topic {} on {}", topic.getMetadata().getName(), topic.getMetadata().getCluster()); + NewTopic newTopic = new NewTopic(topic.getMetadata().getName(), topic.getSpec().getPartitions(), + (short) topic.getSpec().getReplicationFactor()); + newTopic.configs(topic.getSpec().getConfigs()); + log.debug("{}", newTopic); + return newTopic; + }) + .toList(); CreateTopicsResult createTopicsResult = getAdminClient().createTopics(newTopics); createTopicsResult.values().forEach((key, value) -> { @@ -212,43 +254,48 @@ private void createTopics(List topics) { createdTopic.getMetadata().setCreationTimestamp(Date.from(Instant.now())); createdTopic.getMetadata().setGeneration(1); createdTopic.setStatus(Topic.TopicStatus.ofSuccess("Topic created")); - log.info("Success creating topic {} on {}", key, this.kafkaAsyncExecutorConfig.getName()); + log.info("Success creating topic {} on {}", key, this.managedClusterProperties.getName()); } catch (InterruptedException e) { log.error("Error", e); Thread.currentThread().interrupt(); } catch (Exception e) { createdTopic.setStatus(Topic.TopicStatus.ofFailed("Error while creating topic: " + e.getMessage())); - log.error(String.format("Error while creating topic %s on %s", key, this.kafkaAsyncExecutorConfig.getName()), e); + log.error( + String.format("Error while creating topic %s on %s", key, this.managedClusterProperties.getName()), + e); } topicRepository.create(createdTopic); }); } - - private Collection computeConfigChanges(Map expected, Map actual){ + + private Collection computeConfigChanges(Map expected, Map actual) { List toCreate = expected.entrySet() - .stream() - .filter(expectedEntry -> !actual.containsKey(expectedEntry.getKey())) - .map(expectedEntry -> new AlterConfigOp(new ConfigEntry(expectedEntry.getKey(),expectedEntry.getValue()), AlterConfigOp.OpType.SET)) - .toList(); + .stream() + .filter(expectedEntry -> !actual.containsKey(expectedEntry.getKey())) + .map(expectedEntry -> new AlterConfigOp(new ConfigEntry(expectedEntry.getKey(), expectedEntry.getValue()), + AlterConfigOp.OpType.SET)) + .toList(); List toDelete = actual.entrySet() - .stream() - .filter(actualEntry -> !expected.containsKey(actualEntry.getKey())) - .map(expectedEntry -> new AlterConfigOp(new ConfigEntry(expectedEntry.getKey(),expectedEntry.getValue()), AlterConfigOp.OpType.DELETE)) - .toList(); + .stream() + .filter(actualEntry -> !expected.containsKey(actualEntry.getKey())) + .map(expectedEntry -> new AlterConfigOp(new ConfigEntry(expectedEntry.getKey(), expectedEntry.getValue()), + AlterConfigOp.OpType.DELETE)) + .toList(); List toChange = expected.entrySet() - .stream() - .filter(expectedEntry -> { - if (actual.containsKey(expectedEntry.getKey())) { - String actualVal = actual.get(expectedEntry.getKey()); - String expectedVal = expectedEntry.getValue(); - return !expectedVal.equals(actualVal); - } - return false; - }) - .map(expectedEntry -> new AlterConfigOp(new ConfigEntry(expectedEntry.getKey(),expectedEntry.getValue()), AlterConfigOp.OpType.SET)) - .toList(); + .stream() + .filter(expectedEntry -> { + if (actual.containsKey(expectedEntry.getKey())) { + String actualVal = actual.get(expectedEntry.getKey()); + String expectedVal = expectedEntry.getValue(); + return !expectedVal.equals(actualVal); + } + return false; + }) + .map(expectedEntry -> new AlterConfigOp(new ConfigEntry(expectedEntry.getKey(), expectedEntry.getValue()), + AlterConfigOp.OpType.SET)) + .toList(); List total = new ArrayList<>(); total.addAll(toCreate); @@ -260,15 +307,18 @@ private Collection computeConfigChanges(Map expect /** * For a given topic, get each latest offset by partition in order to delete all the records - * before these offsets + * before these offsets. + * * @param topic The topic to delete records * @return A map of offsets by topic-partitions - * @throws ExecutionException Any execution exception + * @throws ExecutionException Any execution exception * @throws InterruptedException Any interrupted exception */ - public Map prepareRecordsToDelete(String topic) throws ExecutionException, InterruptedException { + public Map prepareRecordsToDelete(String topic) + throws ExecutionException, InterruptedException { // List all partitions for topic and prepare a listOffsets call - Map topicsPartitionsToDelete = getAdminClient().describeTopics(List.of(topic)).all().get() + Map topicsPartitionsToDelete = + getAdminClient().describeTopics(List.of(topic)).allTopicNames().get() .entrySet() .stream() .flatMap(topicDescriptionEntry -> topicDescriptionEntry.getValue().partitions().stream()) @@ -277,36 +327,39 @@ public Map prepareRecordsToDelete(String topic) // list all latest offsets for each partitions return getAdminClient().listOffsets(topicsPartitionsToDelete).all().get() - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, kv -> RecordsToDelete.beforeOffset(kv.getValue().offset()))); + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, kv -> RecordsToDelete.beforeOffset(kv.getValue().offset()))); } /** - * Delete the records for each partition, before each offset + * Delete the records for each partition, before each offset. + * * @param recordsToDelete The offsets by topic-partitions * @return The new offsets by topic-partitions * @throws InterruptedException Any interrupted exception */ - public Map deleteRecords(Map recordsToDelete) throws InterruptedException { + public Map deleteRecords(Map recordsToDelete) + throws InterruptedException { return getAdminClient().deleteRecords(recordsToDelete).lowWatermarks().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, kv-> { - try { - var newValue = kv.getValue().get().lowWatermark(); - log.info("Deleting records {} of topic-partition {}", newValue, kv.getKey()); - return newValue; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - log.error(String.format("Thread interrupted deleting records of topic-partition %s", kv.getKey()), e); - return -1L; - } catch (ExecutionException e) { - log.error(String.format("Execution error deleting records of topic-partition %s", kv.getKey()), e); - return -1L; - } catch (Exception e) { - log.error(String.format("Error deleting records of topic-partition %s", kv.getKey()), e); - return -1L; - } - })); + .collect(Collectors.toMap(Map.Entry::getKey, kv -> { + try { + var newValue = kv.getValue().get().lowWatermark(); + log.info("Deleting records {} of topic-partition {}", newValue, kv.getKey()); + return newValue; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.error(String.format("Thread interrupted deleting records of topic-partition %s", kv.getKey()), + e); + return -1L; + } catch (ExecutionException e) { + log.error(String.format("Execution error deleting records of topic-partition %s", kv.getKey()), e); + return -1L; + } catch (Exception e) { + log.error(String.format("Error deleting records of topic-partition %s", kv.getKey()), e); + return -1L; + } + })); } } diff --git a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java index 6a290c4a..119c736e 100644 --- a/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java +++ b/src/main/java/com/michelin/ns4kafka/services/executors/UserAsyncExecutor.java @@ -1,13 +1,22 @@ package com.michelin.ns4kafka.services.executors; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.models.quota.ResourceQuota; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.NamespaceRepository; import com.michelin.ns4kafka.repositories.ResourceQuotaRepository; import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.annotation.EachBean; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import java.security.SecureRandom; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.ScramCredentialInfo; @@ -18,20 +27,18 @@ import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.quota.ClientQuotaFilterComponent; -import java.security.SecureRandom; -import java.util.*; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - +/** + * User executor. + */ @Slf4j -@EachBean(KafkaAsyncExecutorConfig.class) +@EachBean(ManagedClusterProperties.class) @Singleton public class UserAsyncExecutor { public static final double BYTE_RATE_DEFAULT_VALUE = 102400.0; private static final String USER_QUOTA_PREFIX = "user/"; - private final KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig; + private final ManagedClusterProperties managedClusterProperties; private final AbstractUserSynchronizer userExecutor; @@ -41,28 +48,36 @@ public class UserAsyncExecutor { @Inject ResourceQuotaRepository quotaRepository; - public UserAsyncExecutor(KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig) { - this.kafkaAsyncExecutorConfig = kafkaAsyncExecutorConfig; - switch (kafkaAsyncExecutorConfig.getProvider()) { - case SELF_MANAGED: - this.userExecutor = new Scram512UserSynchronizer(kafkaAsyncExecutorConfig.getAdminClient()); - break; - case CONFLUENT_CLOUD: - default: - this.userExecutor = new UnimplementedUserSynchronizer(); - break; + /** + * Constructor. + * + * @param managedClusterProperties The managed cluster properties + */ + public UserAsyncExecutor(ManagedClusterProperties managedClusterProperties) { + this.managedClusterProperties = managedClusterProperties; + if (Objects.requireNonNull(managedClusterProperties.getProvider()) + == ManagedClusterProperties.KafkaProvider.SELF_MANAGED) { + this.userExecutor = new Scram512UserSynchronizer(managedClusterProperties.getAdminClient()); + } else { + this.userExecutor = new UnimplementedUserSynchronizer(); } } + /** + * Run the user synchronization. + */ public void run() { - if (this.kafkaAsyncExecutorConfig.isManageUsers() && this.userExecutor.canSynchronizeQuotas()) { + if (this.managedClusterProperties.isManageUsers() && this.userExecutor.canSynchronizeQuotas()) { synchronizeUsers(); } } + /** + * Start the user synchronization. + */ public void synchronizeUsers() { - log.debug("Starting user collection for cluster {}", kafkaAsyncExecutorConfig.getName()); + log.debug("Starting user collection for cluster {}", managedClusterProperties.getName()); // List user details from broker Map> brokerUserQuotas = this.userExecutor.listQuotas(); // List user details from ns4kafka @@ -70,18 +85,19 @@ public void synchronizeUsers() { // Compute toCreate, toDelete, and toUpdate lists Map> toCreate = ns4kafkaUserQuotas.entrySet() - .stream() - .filter(entry -> !brokerUserQuotas.containsKey(entry.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .stream() + .filter(entry -> !brokerUserQuotas.containsKey(entry.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); Map> toDelete = brokerUserQuotas.entrySet() - .stream() - .filter(entry -> !ns4kafkaUserQuotas.containsKey(entry.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .stream() + .filter(entry -> !ns4kafkaUserQuotas.containsKey(entry.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); Map> toUpdate = ns4kafkaUserQuotas.entrySet() - .stream() - .filter(entry -> brokerUserQuotas.containsKey(entry.getKey())) - .filter(entry -> !entry.getValue().isEmpty() && !entry.getValue().equals(brokerUserQuotas.get(entry.getKey()))) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .stream() + .filter(entry -> brokerUserQuotas.containsKey(entry.getKey())) + .filter( + entry -> !entry.getValue().isEmpty() && !entry.getValue().equals(brokerUserQuotas.get(entry.getKey()))) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); if (log.isDebugEnabled()) { log.debug("UserQuotas to create : " + String.join(", ", toCreate.keySet())); @@ -93,34 +109,40 @@ public void synchronizeUsers() { createUserQuotas(toUpdate); } + /** + * Reset the password of a given user. + * + * @param user The user + * @return The new password + */ public String resetPassword(String user) { if (this.userExecutor.canResetPassword()) { return this.userExecutor.resetPassword(user); } else { throw new ResourceValidationException( - List.of("Password reset is not available with provider " + kafkaAsyncExecutorConfig.getProvider()), - "KafkaUserResetPassword", - user); + List.of("Password reset is not available with provider " + managedClusterProperties.getProvider()), + "KafkaUserResetPassword", + user); } } private Map> collectNs4kafkaQuotas() { - return namespaceRepository.findAllForCluster(this.kafkaAsyncExecutorConfig.getName()) - .stream() - .map(namespace -> { - Optional quota = quotaRepository.findForNamespace(namespace.getMetadata().getName()); - Map userQuota = new HashMap<>(); - - quota.ifPresent(resourceQuota -> resourceQuota.getSpec().entrySet() - .stream() - .filter(q -> q.getKey().startsWith(USER_QUOTA_PREFIX)) - .forEach(q -> userQuota.put( - q.getKey().replaceAll(USER_QUOTA_PREFIX, ""), - Double.parseDouble(q.getValue())))); - - return Map.entry(namespace.getSpec().getKafkaUser(), userQuota); - }) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + return namespaceRepository.findAllForCluster(this.managedClusterProperties.getName()) + .stream() + .map(namespace -> { + Optional quota = quotaRepository.findForNamespace(namespace.getMetadata().getName()); + Map userQuota = new HashMap<>(); + + quota.ifPresent(resourceQuota -> resourceQuota.getSpec().entrySet() + .stream() + .filter(q -> q.getKey().startsWith(USER_QUOTA_PREFIX)) + .forEach(q -> userQuota.put( + q.getKey().replaceAll(USER_QUOTA_PREFIX, ""), + Double.parseDouble(q.getValue())))); + + return Map.entry(namespace.getSpec().getKafkaUser(), userQuota); + }) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } private void createUserQuotas(Map> toCreate) { @@ -141,10 +163,9 @@ interface AbstractUserSynchronizer { static class Scram512UserSynchronizer implements AbstractUserSynchronizer { - private Admin admin; - private final ScramCredentialInfo info = new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_512, 4096); private final SecureRandom secureRandom = new SecureRandom(); + private Admin admin; public Scram512UserSynchronizer(Admin admin) { this.admin = admin; @@ -181,13 +202,14 @@ public String resetPassword(String user) { @Override public Map> listQuotas() { - ClientQuotaFilter filter = ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER))); + ClientQuotaFilter filter = ClientQuotaFilter.containsOnly( + List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER))); try { return admin.describeClientQuotas(filter).entities().get(10, TimeUnit.SECONDS) - .entrySet() - .stream() - .map(entry -> Map.entry(entry.getKey().entries().get(ClientQuotaEntity.USER), entry.getValue())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .entrySet() + .stream() + .map(entry -> Map.entry(entry.getKey().entries().get(ClientQuotaEntity.USER), entry.getValue())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } catch (InterruptedException e) { log.error("Error", e); Thread.currentThread().interrupt(); @@ -201,9 +223,12 @@ public Map> listQuotas() { @Override public void applyQuotas(String user, Map quotas) { ClientQuotaEntity client = new ClientQuotaEntity(Map.of("user", user)); - ClientQuotaAlteration.Op producerQuota = new ClientQuotaAlteration.Op("producer_byte_rate", quotas.getOrDefault("producer_byte_rate", BYTE_RATE_DEFAULT_VALUE)); - ClientQuotaAlteration.Op consumerQuota = new ClientQuotaAlteration.Op("consumer_byte_rate", quotas.getOrDefault("consumer_byte_rate", BYTE_RATE_DEFAULT_VALUE)); - ClientQuotaAlteration clientQuota = new ClientQuotaAlteration(client, List.of(producerQuota, consumerQuota)); + ClientQuotaAlteration.Op producerQuota = new ClientQuotaAlteration.Op("producer_byte_rate", + quotas.getOrDefault("producer_byte_rate", BYTE_RATE_DEFAULT_VALUE)); + ClientQuotaAlteration.Op consumerQuota = new ClientQuotaAlteration.Op("consumer_byte_rate", + quotas.getOrDefault("consumer_byte_rate", BYTE_RATE_DEFAULT_VALUE)); + ClientQuotaAlteration clientQuota = + new ClientQuotaAlteration(client, List.of(producerQuota, consumerQuota)); try { admin.alterClientQuotas(List.of(clientQuota)).all().get(10, TimeUnit.SECONDS); log.info("Success applying quotas {} for user {}", clientQuota.ops(), user); @@ -220,7 +245,8 @@ public void applyQuotas(String user, Map quotas) { static class UnimplementedUserSynchronizer implements AbstractUserSynchronizer { - private final UnsupportedOperationException exception = new UnsupportedOperationException("This cluster provider doesn't support User operations."); + private final UnsupportedOperationException exception = + new UnsupportedOperationException("This cluster provider doesn't support User operations."); @Override public boolean canSynchronizeQuotas() { diff --git a/src/main/java/com/michelin/ns4kafka/utils/BytesUtils.java b/src/main/java/com/michelin/ns4kafka/utils/BytesUtils.java index 4c802d25..d0f78e2e 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/BytesUtils.java +++ b/src/main/java/com/michelin/ns4kafka/utils/BytesUtils.java @@ -2,7 +2,13 @@ import java.math.BigDecimal; import java.math.RoundingMode; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; +/** + * BytesUtils is a utility class to convert bytes to human-readable values and vice-versa. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) public class BytesUtils { public static final String BYTE = "B"; public static final String KIBIBYTE = "KiB"; @@ -10,7 +16,8 @@ public class BytesUtils { public static final String GIBIBYTE = "GiB"; /** - * Converts given bytes to either kibibyte, mebibite or gibibyte + * Converts given bytes to either kibibyte, mebibite or gibibyte. + * * @param bytes The bytes to convert * @return The converted value as human-readable value */ @@ -35,7 +42,8 @@ public static String bytesToHumanReadable(long bytes) { } /** - * Converts given human-readable measure to bytes + * Converts given human-readable measure to bytes. + * * @param quota The measure to convert * @return The converted value as bytes */ @@ -46,24 +54,22 @@ public static long humanReadableToBytes(String quota) { if (quota.endsWith(KIBIBYTE)) { return BigDecimal.valueOf(Double.parseDouble(quota.replace(KIBIBYTE, "")) * kibibyte) - .setScale(0, RoundingMode.CEILING) - .longValue(); + .setScale(0, RoundingMode.CEILING) + .longValue(); } if (quota.endsWith(MEBIBYTE)) { return BigDecimal.valueOf(Double.parseDouble(quota.replace(MEBIBYTE, "")) * mebibyte) - .setScale(0, RoundingMode.CEILING) - .longValue(); + .setScale(0, RoundingMode.CEILING) + .longValue(); } if (quota.endsWith(GIBIBYTE)) { return BigDecimal.valueOf(Double.parseDouble(quota.replace(GIBIBYTE, "")) * gibibyte) - .setScale(0, RoundingMode.CEILING) - .longValue(); + .setScale(0, RoundingMode.CEILING) + .longValue(); } return Long.parseLong(quota.replace(BYTE, "")); } - - private BytesUtils() {} } diff --git a/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java b/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java index b8067f93..682cbc91 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java +++ b/src/main/java/com/michelin/ns4kafka/utils/EncryptionUtils.java @@ -1,18 +1,14 @@ package com.michelin.ns4kafka.utils; -import com.nimbusds.jose.*; +import com.nimbusds.jose.EncryptionMethod; +import com.nimbusds.jose.JOSEException; +import com.nimbusds.jose.JWEAlgorithm; +import com.nimbusds.jose.JWECryptoParts; +import com.nimbusds.jose.JWEHeader; import com.nimbusds.jose.crypto.AESDecrypter; import com.nimbusds.jose.crypto.AESEncrypter; import com.nimbusds.jose.util.Base64URL; import io.micronaut.core.util.StringUtils; -import lombok.extern.slf4j.Slf4j; - -import javax.crypto.Cipher; -import javax.crypto.SecretKey; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.GCMParameterSpec; -import javax.crypto.spec.PBEKeySpec; -import javax.crypto.spec.SecretKeySpec; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -22,10 +18,22 @@ import java.security.spec.InvalidKeySpecException; import java.util.Arrays; import java.util.Base64; +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +/** + * Encryption utils. + */ @Slf4j +@NoArgsConstructor(access = AccessLevel.PRIVATE) public class EncryptionUtils { - /** * The AES encryption algorithm. */ @@ -47,26 +55,21 @@ public class EncryptionUtils { private static final String NS4KAFKA_PREFIX = "NS4K"; /** - * Constructor - */ - private EncryptionUtils() { - } - - /** - * Encrypt given text with the given key to AES256 GCM then encode it to Base64 + * Encrypt given text with the given key to AES256 GCM then encode it to Base64. * * @param clearText The text to encrypt * @param key The key encryption key (KEK) * @return The encrypted password */ - public static String encryptAES256GCM(String clearText, String key) { + public static String encryptAes256Gcm(String clearText, String key) { try { if (!StringUtils.hasText(clearText)) { return clearText; } AESEncrypter encrypter = new AESEncrypter(key.getBytes(StandardCharsets.UTF_8)); - JWECryptoParts encryptedData = encrypter.encrypt(new JWEHeader(JWEAlgorithm.A256KW, EncryptionMethod.A256GCM), + JWECryptoParts encryptedData = + encrypter.encrypt(new JWEHeader(JWEAlgorithm.A256KW, EncryptionMethod.A256GCM), clearText.getBytes(StandardCharsets.UTF_8)); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); @@ -84,13 +87,13 @@ public static String encryptAES256GCM(String clearText, String key) { } /** - * Decrypt given text with the given key from AES256 GCM + * Decrypt given text with the given key from AES256 GCM. * * @param encryptedText The text to decrypt * @param key The key encryption key (KEK) * @return The decrypted text */ - public static String decryptAES256GCM(String encryptedText, String key) { + public static String decryptAes256Gcm(String encryptedText, String key) { try { if (!StringUtils.hasText(encryptedText)) { return encryptedText; @@ -105,7 +108,7 @@ public static String decryptAES256GCM(String encryptedText, String key) { Base64URL text = Base64URL.encode(Arrays.copyOfRange(encryptedData, 68, encryptedData.length)); byte[] clearTextAsBytes = decrypter.decrypt(new JWEHeader(JWEAlgorithm.A256KW, EncryptionMethod.A256GCM), - encryptedKey, iv, text, auth); + encryptedKey, iv, text, auth); return new String(clearTextAsBytes); } catch (JOSEException e) { @@ -123,23 +126,23 @@ public static String decryptAES256GCM(String encryptedText, String key) { * @param salt The encryption salt. * @return The encrypted password. */ - public static String encryptAESWithPrefix(final String clearText, final String key, final String salt) { + public static String encryptAesWithPrefix(final String clearText, final String key, final String salt) { if (!StringUtils.hasText(clearText)) { return clearText; } try { - final SecretKey secret = getAESSecretKey(key, salt); - final byte[] iv = getRandomIV(); + final SecretKey secret = getAesSecretKey(key, salt); + final byte[] iv = getRandomIv(); final var cipher = Cipher.getInstance(ENCRYPT_ALGO); cipher.init(Cipher.ENCRYPT_MODE, secret, new GCMParameterSpec(TAG_LENGTH_BIT, iv)); final byte[] cipherText = cipher.doFinal(clearText.getBytes(StandardCharsets.UTF_8)); final byte[] prefix = NS4KAFKA_PREFIX.getBytes(StandardCharsets.UTF_8); final byte[] cipherTextWithIv = ByteBuffer.allocate(prefix.length + iv.length + cipherText.length) - .put(prefix) - .put(iv) - .put(cipherText) - .array(); + .put(prefix) + .put(iv) + .put(cipherText) + .array(); return Base64.getEncoder().encodeToString(cipherTextWithIv); } catch (Exception e) { log.error("An error occurred during Connect cluster AES256 string encryption", e); @@ -156,7 +159,7 @@ public static String encryptAESWithPrefix(final String clearText, final String k * @param salt The encryption salt. * @return The encrypted password. */ - public static String decryptAESWithPrefix(final String encryptedText, final String key, final String salt) { + public static String decryptAesWithPrefix(final String encryptedText, final String key, final String salt) { if (!StringUtils.hasText(encryptedText)) { return encryptedText; } @@ -172,7 +175,7 @@ public static String decryptAESWithPrefix(final String encryptedText, final Stri byteBuffer.get(cipherText); // decrypt the cipher text. - final SecretKey secret = getAESSecretKey(key, salt); + final SecretKey secret = getAesSecretKey(key, salt); final var cipher = Cipher.getInstance(ENCRYPT_ALGO); cipher.init(Cipher.DECRYPT_MODE, secret, new GCMParameterSpec(TAG_LENGTH_BIT, iv)); return new String(cipher.doFinal(cipherText), StandardCharsets.UTF_8); @@ -185,7 +188,7 @@ public static String decryptAESWithPrefix(final String encryptedText, final Stri /** - * Gets the secret key derived AES 256 bits key + * Gets the secret key derived AES 256 bits key. * * @param key The encryption key * @param salt The encryption salt @@ -193,8 +196,8 @@ public static String decryptAESWithPrefix(final String encryptedText, final Stri * @throws NoSuchAlgorithmException No such algorithm exception. * @throws InvalidKeySpecException Invalid key spec exception. */ - private static SecretKey getAESSecretKey(final String key, final String salt) - throws NoSuchAlgorithmException, InvalidKeySpecException { + private static SecretKey getAesSecretKey(final String key, final String salt) + throws NoSuchAlgorithmException, InvalidKeySpecException { var factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256"); var spec = new PBEKeySpec(key.toCharArray(), salt.getBytes(StandardCharsets.UTF_8), 65536, 256); return new SecretKeySpec(factory.generateSecret(spec).getEncoded(), "AES"); @@ -205,7 +208,7 @@ private static SecretKey getAESSecretKey(final String key, final String salt) * * @return The random IV. */ - private static byte[] getRandomIV() { + private static byte[] getRandomIv() { final byte[] iv = new byte[IV_LENGTH_BYTE]; new SecureRandom().nextBytes(iv); return iv; diff --git a/src/main/java/com/michelin/ns4kafka/utils/ValidationErrorUtils.java b/src/main/java/com/michelin/ns4kafka/utils/ValidationErrorUtils.java index c5afe4f6..27611061 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/ValidationErrorUtils.java +++ b/src/main/java/com/michelin/ns4kafka/utils/ValidationErrorUtils.java @@ -1,7 +1,12 @@ package com.michelin.ns4kafka.utils; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +/** + * Validation error utils. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) public class ValidationErrorUtils { public static final String INVALID_VALUE = "Invalid value "; - - private ValidationErrorUtils() { } } diff --git a/src/main/java/com/michelin/ns4kafka/utils/config/ConnectorConfig.java b/src/main/java/com/michelin/ns4kafka/utils/config/ConnectorConfig.java index da6d34cc..6105fd79 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/config/ConnectorConfig.java +++ b/src/main/java/com/michelin/ns4kafka/utils/config/ConnectorConfig.java @@ -1,7 +1,12 @@ package com.michelin.ns4kafka.utils.config; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +/** + * Connector configuration. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) public final class ConnectorConfig { public static final String CONNECTOR_CLASS = "connector.class"; - - private ConnectorConfig() { } } diff --git a/src/main/java/com/michelin/ns4kafka/utils/config/TopicConfig.java b/src/main/java/com/michelin/ns4kafka/utils/config/TopicConfig.java index db9181d4..839a34b4 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/config/TopicConfig.java +++ b/src/main/java/com/michelin/ns4kafka/utils/config/TopicConfig.java @@ -1,8 +1,13 @@ package com.michelin.ns4kafka.utils.config; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +/** + * Topic configuration. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) public final class TopicConfig { public static final String PARTITIONS = "partitions"; public static final String REPLICATION_FACTOR = "replication.factor"; - - private TopicConfig() { } } diff --git a/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java b/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java index 3f24f633..ead2563e 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java +++ b/src/main/java/com/michelin/ns4kafka/utils/enums/ApplyStatus.java @@ -1,5 +1,8 @@ package com.michelin.ns4kafka.utils.enums; +/** + * ApplyStatus is used to track the status of a resource during an operation. + */ public enum ApplyStatus { created, changed, unchanged, deleted } diff --git a/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java b/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java index a9a7eeb6..efd0757a 100644 --- a/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java +++ b/src/main/java/com/michelin/ns4kafka/utils/exceptions/ResourceValidationException.java @@ -1,23 +1,22 @@ package com.michelin.ns4kafka.utils.exceptions; -import lombok.Getter; - +import java.io.Serial; import java.util.List; +import lombok.AllArgsConstructor; +import lombok.Getter; - +/** + * Resource validation exception. + */ +@Getter +@AllArgsConstructor public class ResourceValidationException extends RuntimeException { + @Serial private static final long serialVersionUID = 32400191899153204L; - @Getter + private final List validationErrors; - @Getter private final String kind; - @Getter - private final String name; - public ResourceValidationException(List validationErrors, String kind, String name) { - this.validationErrors = validationErrors; - this.kind = kind; - this.name = name; - } + private final String name; } diff --git a/src/main/java/com/michelin/ns4kafka/validation/ConnectValidator.java b/src/main/java/com/michelin/ns4kafka/validation/ConnectValidator.java index b9c61f4d..fbcfeced 100644 --- a/src/main/java/com/michelin/ns4kafka/validation/ConnectValidator.java +++ b/src/main/java/com/michelin/ns4kafka/validation/ConnectValidator.java @@ -1,27 +1,29 @@ package com.michelin.ns4kafka.validation; +import static com.michelin.ns4kafka.utils.ValidationErrorUtils.INVALID_VALUE; +import static com.michelin.ns4kafka.utils.config.ConnectorConfig.CONNECTOR_CLASS; + import com.fasterxml.jackson.annotation.JsonSetter; import com.fasterxml.jackson.annotation.Nulls; import com.michelin.ns4kafka.models.connector.Connector; import io.micronaut.core.util.StringUtils; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import lombok.Builder; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; import lombok.experimental.SuperBuilder; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static com.michelin.ns4kafka.utils.ValidationErrorUtils.INVALID_VALUE; -import static com.michelin.ns4kafka.utils.config.ConnectorConfig.CONNECTOR_CLASS; - +/** + * Validator for connectors. + */ @Data @SuperBuilder @NoArgsConstructor -@EqualsAndHashCode(callSuper=true) +@EqualsAndHashCode(callSuper = true) public class ConnectValidator extends ResourceValidator { @Builder.Default @JsonSetter(nulls = Nulls.AS_EMPTY) @@ -36,8 +38,42 @@ public class ConnectValidator extends ResourceValidator { private Map> classValidationConstraints = new HashMap<>(); /** - * Validate a given connector - * @param connector The connector + * Make a default ConnectValidator. + * + * @return A ConnectValidator + */ + public static ConnectValidator makeDefault() { + return ConnectValidator.builder() + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + CONNECTOR_CLASS, new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", + "io.confluent.connect.jdbc.JdbcSinkConnector", + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false + ) + )) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString() + )) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString() + )) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString() + ) + )) + .build(); + } + + /** + * Validate a given connector. + * + * @param connector The connector * @param connectorType The connector type * @return A list of validation errors */ @@ -49,12 +85,14 @@ public List validate(Connector connector, String connectorType) { } if (connector.getMetadata().getName().length() > 249) { - validationErrors.add(INVALID_VALUE + connector.getMetadata().getName() + " for name: Value must not be longer than 249"); + validationErrors.add( + INVALID_VALUE + connector.getMetadata().getName() + " for name: Value must not be longer than 249"); } if (!connector.getMetadata().getName().matches("[a-zA-Z0-9._-]+")) { - validationErrors.add(INVALID_VALUE + connector.getMetadata().getName() + " for name: Value must only contain " + - "ASCII alphanumerics, '.', '_' or '-'"); + validationErrors.add( + INVALID_VALUE + connector.getMetadata().getName() + " for name: Value must only contain " + + "ASCII alphanumerics, '.', '_' or '-'"); } validationConstraints.forEach((key, value) -> { @@ -86,42 +124,15 @@ public List validate(Connector connector, String connectorType) { } if (classValidationConstraints.containsKey(connector.getSpec().getConfig().get(CONNECTOR_CLASS))) { - classValidationConstraints.get(connector.getSpec().getConfig().get(CONNECTOR_CLASS)).forEach((key, value) -> { - try { - value.ensureValid(key, connector.getSpec().getConfig().get(key)); - } catch (FieldValidationException e) { - validationErrors.add(e.getMessage()); - } - }); + classValidationConstraints.get(connector.getSpec().getConfig().get(CONNECTOR_CLASS)) + .forEach((key, value) -> { + try { + value.ensureValid(key, connector.getSpec().getConfig().get(key)); + } catch (FieldValidationException e) { + validationErrors.add(e.getMessage()); + } + }); } return validationErrors; } - - public static ConnectValidator makeDefault() { - return ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - CONNECTOR_CLASS, new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false - ) - )) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString() - )) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString() - )) - .classValidationConstraints(Map.of( - "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString() - ) - )) - .build(); - } } diff --git a/src/main/java/com/michelin/ns4kafka/validation/FieldValidationException.java b/src/main/java/com/michelin/ns4kafka/validation/FieldValidationException.java index f477e663..430e3a13 100644 --- a/src/main/java/com/michelin/ns4kafka/validation/FieldValidationException.java +++ b/src/main/java/com/michelin/ns4kafka/validation/FieldValidationException.java @@ -1,6 +1,12 @@ package com.michelin.ns4kafka.validation; +import java.io.Serial; + +/** + * Field validation exception. + */ public class FieldValidationException extends RuntimeException { + @Serial private static final long serialVersionUID = 6223587833587267232L; public FieldValidationException(String name, Object value, String message) { diff --git a/src/main/java/com/michelin/ns4kafka/validation/ResourceValidator.java b/src/main/java/com/michelin/ns4kafka/validation/ResourceValidator.java index f91a8fa8..a304cf9c 100644 --- a/src/main/java/com/michelin/ns4kafka/validation/ResourceValidator.java +++ b/src/main/java/com/michelin/ns4kafka/validation/ResourceValidator.java @@ -4,14 +4,20 @@ import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.annotation.Nulls; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; import lombok.experimental.SuperBuilder; -import java.util.*; - +/** + * Resource validator. + */ @Data @SuperBuilder @AllArgsConstructor @@ -19,22 +25,26 @@ public abstract class ResourceValidator { @Builder.Default @JsonSetter(nulls = Nulls.AS_EMPTY) - protected Map validationConstraints = new HashMap<>(); + protected Map validationConstraints = new HashMap<>(); + /** + * Validate the configuration. + */ @JsonTypeInfo( - use = JsonTypeInfo.Id.NAME, - property = "validation-type") + use = JsonTypeInfo.Id.NAME, + property = "validation-type") @JsonSubTypes({ - @JsonSubTypes.Type(value = Range.class, name = "Range"), - @JsonSubTypes.Type(value = ValidList.class, name = "ValidList"), - @JsonSubTypes.Type(value = ValidString.class, name = "ValidString"), - @JsonSubTypes.Type(value = NonEmptyString.class, name = "NonEmptyString"), - @JsonSubTypes.Type(value = CompositeValidator.class, name = "CompositeValidator") + @JsonSubTypes.Type(value = Range.class, name = "Range"), + @JsonSubTypes.Type(value = ValidList.class, name = "ValidList"), + @JsonSubTypes.Type(value = ValidString.class, name = "ValidString"), + @JsonSubTypes.Type(value = NonEmptyString.class, name = "NonEmptyString"), + @JsonSubTypes.Type(value = CompositeValidator.class, name = "CompositeValidator") }) public interface Validator { /** * Perform single configuration validation. - * @param name The name of the configuration + * + * @param name The name of the configuration * @param value The value of the configuration * @throws FieldValidationException if the value is invalid. */ @@ -42,7 +52,7 @@ public interface Validator { } /** - * Validation logic for numeric ranges + * Validation logic for numeric ranges. */ @Data @NoArgsConstructor @@ -52,9 +62,10 @@ public static class Range implements ResourceValidator.Validator { private boolean optional = false; /** - * A numeric range with inclusive upper bound and inclusive lower bound - * @param min the lower bound - * @param max the upper bound + * A numeric range with inclusive upper bound and inclusive lower bound. + * + * @param min the lower bound + * @param max the upper bound */ public Range(Number min, Number max, boolean optional) { this.min = min; @@ -63,7 +74,7 @@ public Range(Number min, Number max, boolean optional) { } /** - * A numeric range that checks only the lower bound + * A numeric range that checks only the lower bound. * * @param min The minimum acceptable value */ @@ -72,78 +83,100 @@ public static ResourceValidator.Range atLeast(Number min) { } /** - * A numeric range that checks both the upper (inclusive) and lower bound + * A numeric range that checks both the upper (inclusive) and lower bound. */ public static ResourceValidator.Range between(Number min, Number max) { return new ResourceValidator.Range(min, max, false); } + /** - * A numeric range that checks both the upper (inclusive) and lower bound, and accepts null as well + * A numeric range that checks both the upper (inclusive) and lower bound, and accepts null as well. */ public static ResourceValidator.Range optionalBetween(Number min, Number max) { return new ResourceValidator.Range(min, max, true); } + /** + * Ensure that the value is in the range. + * + * @param name The name of the configuration + * @param o The value of the configuration + */ public void ensureValid(String name, Object o) { - Number n = null; + Number n; if (o == null) { - if (optional) + if (optional) { return; + } throw new FieldValidationException(name, null, "Value must be non-null"); } try { n = Double.valueOf(o.toString()); - }catch (NumberFormatException e){ - throw new FieldValidationException(name,o.toString(),"Value must be a Number"); + } catch (NumberFormatException e) { + throw new FieldValidationException(name, o.toString(), "Value must be a Number"); } - if (min != null && n.doubleValue() < min.doubleValue()) + if (min != null && n.doubleValue() < min.doubleValue()) { throw new FieldValidationException(name, o, "Value must be at least " + min); - if (max != null && n.doubleValue() > max.doubleValue()) + } + if (max != null && n.doubleValue() > max.doubleValue()) { throw new FieldValidationException(name, o, "Value must be no more than " + max); + } } + /** + * Return a string representation of the range. + * + * @return A string representation of the range + */ public String toString() { - if (min == null && max == null) + if (min == null && max == null) { return "[...]"; - else if (min == null) + } else if (min == null) { return "[...," + max + "]"; - else if (max == null) + } else if (max == null) { return "[" + min + ",...]"; - else + } else { return "[" + min + ",...," + max + "]"; + } } } + /** + * Validation logic for a list of valid strings. + */ @Data @NoArgsConstructor + @AllArgsConstructor public static class ValidList implements ResourceValidator.Validator { - private List validStrings; private boolean optional = false; - public ValidList(List validStrings, boolean optional) { - this.validStrings = validStrings; - this.optional = optional; - } - public static ResourceValidator.ValidList in(String... validStrings) { return new ResourceValidator.ValidList(Arrays.asList(validStrings), false); } + public static ResourceValidator.ValidList optionalIn(String... validStrings) { return new ResourceValidator.ValidList(Arrays.asList(validStrings), true); } + /** + * Ensure that the value is one of the valid strings. + * + * @param name The name of the configuration + * @param o The value of the configuration + */ @Override public void ensureValid(final String name, final Object o) { if (o == null) { - if (optional) + if (optional) { return; + } throw new FieldValidationException(name, null, "Value must be non-null"); } - String s = (String)o; + String s = (String) o; List values = List.of(s); //default if no "," (most of the time) - if(s.contains(",")){ + if (s.contains(",")) { //split and strip values = Arrays.stream(s.split(",")).map(String::strip).toList(); } @@ -154,61 +187,92 @@ public void ensureValid(final String name, final Object o) { } } + /** + * Return a string representation of the valid strings. + * + * @return A string representation of the valid strings + */ public String toString() { return validStrings.toString(); } } + /** + * Validation logic for a valid string. + */ @Data @NoArgsConstructor + @AllArgsConstructor public static class ValidString implements ResourceValidator.Validator { private List validStrings; private boolean optional = false; - - public ValidString(List validStrings, boolean optional) { - this.validStrings = validStrings; - this.optional = optional; - } - public static ResourceValidator.ValidString in(String... validStrings) { return new ResourceValidator.ValidString(Arrays.asList(validStrings), false); } + public static ResourceValidator.ValidString optionalIn(String... validStrings) { return new ResourceValidator.ValidString(Arrays.asList(validStrings), true); } + /** + * Ensure that the value is one of the valid strings. + * + * @param name The name of the configuration + * @param o The value of the configuration + */ @Override public void ensureValid(String name, Object o) { if (o == null) { - if (optional) + if (optional) { return; + } throw new FieldValidationException(name, null, "Value must be non-null"); } String s = (String) o; if (!validStrings.contains(s)) { - throw new FieldValidationException(name, o, "String must be one of: " + String.join(", ", validStrings)); + throw new FieldValidationException(name, o, + "String must be one of: " + String.join(", ", validStrings)); } } + /** + * Return a string representation of the valid strings. + * + * @return A string representation of the valid strings + */ public String toString() { return "[" + String.join(", ", validStrings) + "]"; } } + /** + * Validation logic for a non-empty string. + */ public static class NonEmptyString implements ResourceValidator.Validator { - + /** + * Ensure that the value is non-empty. + * + * @param name The name of the configuration + * @param o The value of the configuration + */ @Override public void ensureValid(String name, Object o) { - if (o == null) + if (o == null) { throw new FieldValidationException(name, null, "Value must be non-null"); + } String s = (String) o; - if (s != null && s.isEmpty()) { + if (s.isEmpty()) { throw new FieldValidationException(name, o, "String must be non-empty"); } } + /** + * Return a string representation of the valid strings. + * + * @return A string representation of the valid strings + */ @Override public String toString() { return "non-empty string"; @@ -216,10 +280,13 @@ public String toString() { @Override public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (!(obj instanceof NonEmptyString)) return false; - return true; + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + return obj instanceof NonEmptyString; } @Override @@ -228,6 +295,9 @@ public int hashCode() { } } + /** + * Validation logic for a composite validator. + */ @Data @NoArgsConstructor public static class CompositeValidator implements ResourceValidator.Validator { @@ -243,161 +313,24 @@ public static ResourceValidator.CompositeValidator of(ResourceValidator.Validato @Override public void ensureValid(String name, Object value) { - for (ResourceValidator.Validator validator: validators) { + for (ResourceValidator.Validator validator : validators) { validator.ensureValid(name, value); } } @Override public String toString() { - if (validators == null) return ""; - StringBuilder desc = new StringBuilder(); - for (ResourceValidator.Validator v: validators) { - if (desc.length() > 0) { - desc.append(',').append(' '); - } - desc.append(String.valueOf(v)); + if (validators == null) { + return ""; } - return desc.toString(); - } - } - /* - public static class CaseInsensitiveValidString implements ResourceValidator.Validator { - - Set validStrings; - - private CaseInsensitiveValidString(List validStrings) { - this.validStrings = validStrings.stream() - .map(s -> s.toUpperCase(Locale.ROOT)) - .collect(Collectors.toSet()); - } - - public static ResourceValidator.CaseInsensitiveValidString in(String... validStrings) { - return new ResourceValidator.CaseInsensitiveValidString(Arrays.asList(validStrings)); - } - - @Override - public void ensureValid(String name, Object o) { - String s = (String) o; - if (s == null || !validStrings.contains(s.toUpperCase(Locale.ROOT))) { - throw new ValidationException(name, o, "String must be one of (case insensitive): " + String.join(", ", validStrings)); - } - } - - public String toString() { - return "(case insensitive) [" + String.join(", ", validStrings) + "]"; - } - } - - public static class NonNullValidator implements ResourceValidator.Validator { - @Override - public void ensureValid(String name, Object value) { - if (value == null) { - // Pass in the string null to avoid the spotbugs warning - throw new ValidationException(name, "null", "entry must be non null"); - } - } - - public String toString() { - return "non-null string"; - } - } - - public static class LambdaValidator implements ResourceValidator.Validator { - BiConsumer ensureValid; - Supplier toStringFunction; - - private LambdaValidator(BiConsumer ensureValid, - Supplier toStringFunction) { - this.ensureValid = ensureValid; - this.toStringFunction = toStringFunction; - } - - public static ResourceValidator.LambdaValidator with(BiConsumer ensureValid, - Supplier toStringFunction) { - return new ResourceValidator.LambdaValidator(ensureValid, toStringFunction); - } - - @Override - public void ensureValid(String name, Object value) { - ensureValid.accept(name, value); - } - - @Override - public String toString() { - return toStringFunction.get(); - } - } - - public static class CompositeValidator implements ResourceValidator.Validator { - private final List validators; - - private CompositeValidator(List validators) { - this.validators = Collections.unmodifiableList(validators); - } - - public static ResourceValidator.CompositeValidator of(ResourceValidator.Validator... validators) { - return new ResourceValidator.CompositeValidator(Arrays.asList(validators)); - } - - @Override - public void ensureValid(String name, Object value) { - for (ResourceValidator.Validator validator: validators) { - validator.ensureValid(name, value); - } - } - - @Override - public String toString() { - if (validators == null) return ""; StringBuilder desc = new StringBuilder(); - for (ResourceValidator.Validator v: validators) { + for (ResourceValidator.Validator v : validators) { if (desc.length() > 0) { desc.append(',').append(' '); } - desc.append(String.valueOf(v)); + desc.append(v); } return desc.toString(); } } - - public static class NonEmptyStringWithoutControlChars implements ResourceValidator.Validator { - - public static ResourceValidator.NonEmptyStringWithoutControlChars nonEmptyStringWithoutControlChars() { - return new ResourceValidator.NonEmptyStringWithoutControlChars(); - } - - @Override - public void ensureValid(String name, Object value) { - String s = (String) value; - - if (s == null) { - // This can happen during creation of the config object due to no default value being defined for the - // name configuration - a missing name parameter is caught when checking for mandatory parameters, - // thus we can ok a null value here - return; - } else if (s.isEmpty()) { - throw new ValidationException(name, value, "String may not be empty"); - } - - // Check name string for illegal characters - ArrayList foundIllegalCharacters = new ArrayList<>(); - - for (int i = 0; i < s.length(); i++) { - if (Character.isISOControl(s.codePointAt(i))) { - foundIllegalCharacters.add(String.valueOf(s.codePointAt(i))); - } - } - - if (!foundIllegalCharacters.isEmpty()) { - throw new ValidationException(name, value, "String may not contain control sequences but had the following ASCII chars: " + String.join(", ", foundIllegalCharacters)); - } - } - - public String toString() { - return "non-empty string without ISO control characters"; - } - } - - */ } diff --git a/src/main/java/com/michelin/ns4kafka/validation/TopicValidator.java b/src/main/java/com/michelin/ns4kafka/validation/TopicValidator.java index 330a5849..aeea3bd4 100644 --- a/src/main/java/com/michelin/ns4kafka/validation/TopicValidator.java +++ b/src/main/java/com/michelin/ns4kafka/validation/TopicValidator.java @@ -1,18 +1,23 @@ package com.michelin.ns4kafka.validation; -import com.michelin.ns4kafka.models.Topic; -import lombok.*; -import lombok.experimental.SuperBuilder; +import static com.michelin.ns4kafka.utils.config.TopicConfig.PARTITIONS; +import static com.michelin.ns4kafka.utils.config.TopicConfig.REPLICATION_FACTOR; +import com.michelin.ns4kafka.models.Topic; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +import lombok.experimental.SuperBuilder; -import static com.michelin.ns4kafka.utils.config.TopicConfig.PARTITIONS; -import static com.michelin.ns4kafka.utils.config.TopicConfig.REPLICATION_FACTOR; - +/** + * Topic validator. + */ @Getter @Setter @SuperBuilder @@ -20,38 +25,84 @@ @EqualsAndHashCode(callSuper = true) public class TopicValidator extends ResourceValidator { /** - * Validate a given topic + * Build a default topic validator. + * + * @return The topic validator + */ + public static TopicValidator makeDefault() { + return TopicValidator.builder() + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000), + "retention.bytes", ResourceValidator.Range.optionalBetween(-1, 104857600), + "preallocate", ResourceValidator.ValidString.optionalIn("true", "false") + ) + ) + .build(); + } + + /** + * Build a default topic validator for one broker. + * + * @return The topic validator + */ + public static TopicValidator makeDefaultOneBroker() { + return TopicValidator.builder() + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(1, 1), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(1, 1), + "retention.ms", ResourceValidator.Range.between(60000, 604800000), + "retention.bytes", ResourceValidator.Range.optionalBetween(-1, 104857600), + "preallocate", ResourceValidator.ValidString.optionalIn("true", "false") + ) + ) + .build(); + } + + /** + * Validate a given topic. + * * @param topic The topic * @return A list of validation errors - * @see https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L36 + * @see GitHub */ public List validate(Topic topic) { List validationErrors = new ArrayList<>(); if (topic.getMetadata().getName().isEmpty()) { - validationErrors.add("Invalid value " + topic.getMetadata().getName() + " for name: Value must not be empty"); + validationErrors.add( + "Invalid value " + topic.getMetadata().getName() + " for name: Value must not be empty"); } if (topic.getMetadata().getName().equals(".") || topic.getMetadata().getName().equals("..")) { - validationErrors.add("Invalid value " + topic.getMetadata().getName() + " for name: Value must not be \".\" or \"..\""); + validationErrors.add( + "Invalid value " + topic.getMetadata().getName() + " for name: Value must not be \".\" or \"..\""); } if (topic.getMetadata().getName().length() > 249) { - validationErrors.add("Invalid value " + topic.getMetadata().getName() + " for name: Value must not be longer than 249"); + validationErrors.add( + "Invalid value " + topic.getMetadata().getName() + " for name: Value must not be longer than 249"); } if (!topic.getMetadata().getName().matches("[a-zA-Z0-9._-]+")) { - validationErrors.add("Invalid value " + topic.getMetadata().getName() + " for name: Value must only contain " + - "ASCII alphanumerics, '.', '_' or '-'"); + validationErrors.add( + "Invalid value " + topic.getMetadata().getName() + " for name: Value must only contain " + + "ASCII alphanumerics, '.', '_' or '-'"); } if (!validationConstraints.isEmpty() && topic.getSpec().getConfigs() != null) { Set configsWithoutConstraints = topic.getSpec().getConfigs().keySet() - .stream() - .filter(s -> !validationConstraints.containsKey(s)) - .collect(Collectors.toSet()); + .stream() + .filter(s -> !validationConstraints.containsKey(s)) + .collect(Collectors.toSet()); if (!configsWithoutConstraints.isEmpty()) { - validationErrors.add("Configurations [" + String.join(",", configsWithoutConstraints) + "] are not allowed"); + validationErrors.add( + "Configurations [" + String.join(",", configsWithoutConstraints) + "] are not allowed"); } } @@ -65,7 +116,8 @@ public List validate(Topic topic) { if (topic.getSpec().getConfigs() != null) { value.ensureValid(key, topic.getSpec().getConfigs().get(key)); } else { - validationErrors.add("Invalid value null for configuration " + key + ": Value must be non-null"); + validationErrors.add( + "Invalid value null for configuration " + key + ": Value must be non-null"); } } } catch (FieldValidationException e) { @@ -75,33 +127,4 @@ public List validate(Topic topic) { return validationErrors; } - public static TopicValidator makeDefault() { - return TopicValidator.builder() - .validationConstraints( - Map.of( "replication.factor", ResourceValidator.Range.between(3,3), - "partitions", ResourceValidator.Range.between(3,6), - "cleanup.policy", ResourceValidator.ValidList.in("delete","compact"), - "min.insync.replicas", ResourceValidator.Range.between(2,2), - "retention.ms", ResourceValidator.Range.between(60000,604800000), - "retention.bytes", ResourceValidator.Range.optionalBetween(-1, 104857600), - "preallocate", ResourceValidator.ValidString.optionalIn("true", "false") - ) - ) - .build(); - } - public static TopicValidator makeDefaultOneBroker(){ - return TopicValidator.builder() - .validationConstraints( - Map.of( "replication.factor", ResourceValidator.Range.between(1,1), - "partitions", ResourceValidator.Range.between(3,6), - "cleanup.policy", ResourceValidator.ValidList.in("delete","compact"), - "min.insync.replicas", ResourceValidator.Range.between(1,1), - "retention.ms", ResourceValidator.Range.between(60000,604800000), - "retention.bytes", ResourceValidator.Range.optionalBetween(-1, 104857600), - "preallocate", ResourceValidator.ValidString.optionalIn("true", "false") - ) - ) - .build(); - } - } diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index bf06d7df..708768b2 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -122,12 +122,12 @@ ns4kafka: security: aes256-encryption-key: changeitchangeitchangeitchangeit admin-group: _ -# local-users: # Not for production use. -# - username: admin -# # SHA-256 password. -# password: 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918 -# groups: -# - "admin" + # local-users: # Not for production use. + # - username: admin + # # SHA-256 password. + # password: 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918 + # groups: + # - "admin" store: kafka: enabled: true @@ -165,4 +165,4 @@ ns4kafka: routes: enabled: false threaddump: - enabled: false + enabled: false \ No newline at end of file diff --git a/src/test/java/com/michelin/ns4kafka/controllers/AclControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/AclControllerTest.java index 47bdac63..776a3cdd 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/AclControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/AclControllerTest.java @@ -1,5 +1,15 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.acl.AclController; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.AuditLog; @@ -14,6 +24,9 @@ import io.micronaut.http.HttpStatus; import io.micronaut.security.authentication.Authentication; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -22,14 +35,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class AclControllerTest { @Mock @@ -49,89 +54,60 @@ class AclControllerTest { @Test void list() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); // granted by admin to test - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) + AccessControlEntry ace1 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) .build(); // granted by admin to test - AccessControlEntry ace2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) + AccessControlEntry ace2 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) .build(); // granted by test to namespace-other - AccessControlEntry ace3 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("prefix") - .grantedTo("namespace-other") - .build() - ) - .build(); + AccessControlEntry ace3 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("prefix").grantedTo("namespace-other") + .build()).build(); // granted by admin to namespace-other - AccessControlEntry ace4 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) + AccessControlEntry ace4 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("other-prefix") - .grantedTo("namespace-other") - .build() - ) - .build(); + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("other-prefix") + .grantedTo("namespace-other").build()).build(); // granted by namespace-other to test AccessControlEntry ace5 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace-other").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("other-prefix") - .grantedTo("test") - .build() - ) - .build(); + .metadata(ObjectMeta.builder().namespace("namespace-other").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("other-prefix").grantedTo("test").build()) + .build(); // granted by admin to all (public) - AccessControlEntry ace6 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) + AccessControlEntry ace6 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("public-prefix") - .grantedTo("*") - .build() - ) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("public-prefix").grantedTo("*").build()) .build(); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of(ace1, ace2, ace5, ace6)); - when(accessControlEntryService.findAllForCluster("local")) - .thenReturn(List.of(ace1, ace2, ace3, ace4, ace5, ace6)); - - List actual = accessControlListController.list("test", Optional.of(AclController.AclLimit.GRANTEE)); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.findAllGrantedToNamespace(ns)).thenReturn(List.of(ace1, ace2, ace5, ace6)); + when(accessControlEntryService.findAllForCluster("local")).thenReturn( + List.of(ace1, ace2, ace3, ace4, ace5, ace6)); + + List actual = + accessControlListController.list("test", Optional.of(AclController.AclLimit.GRANTEE)); assertEquals(4, actual.size()); assertTrue(actual.contains(ace1)); assertTrue(actual.contains(ace2)); @@ -153,149 +129,105 @@ void list() { } @Test - void getAcl(){ - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + void getAcl() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); // granted by tes to test AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); + .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); // granted by test to test AccessControlEntry ace2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace2").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); + .metadata(ObjectMeta.builder().name("ace2").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); // granted by test to namespace-other AccessControlEntry ace3 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace3").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("prefix") - .grantedTo("namespace-other") - .build() - ) - .build(); + .metadata(ObjectMeta.builder().name("ace3").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("prefix").grantedTo("namespace-other") + .build()).build(); // granted by admin to namespace-other AccessControlEntry ace4 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace4").namespace("admin").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("other-prefix") - .grantedTo("namespace-other") - .build() - ) - .build(); + .metadata(ObjectMeta.builder().name("ace4").namespace("admin").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("other-prefix") + .grantedTo("namespace-other").build()).build(); // granted by namespace-other to test AccessControlEntry ace5 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace5").namespace("namespace-other").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("other-prefix") - .grantedTo("test") - .build() - ) - .build(); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findAllForCluster("local")) - .thenReturn(List.of(ace1, ace2, ace3, ace4, ace5)); - - // name not in list + .metadata(ObjectMeta.builder().name("ace5").namespace("namespace-other").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("other-prefix").grantedTo("test").build()) + .build(); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.findAllForCluster("local")).thenReturn(List.of(ace1, ace2, ace3, ace4, ace5)); + + // Name not in list Optional result1 = accessControlListController.get("test", "ace6"); - // not granted to or assigned by me + assertTrue(result1.isEmpty()); + + // Not granted to or assigned by me Optional result2 = accessControlListController.get("test", "ace4"); - // assigned by me - Optional result3 = accessControlListController.get("test", "ace3"); - // granted to me - Optional result4 = accessControlListController.get("test", "ace5"); - assertTrue(result1.isEmpty()); assertTrue(result2.isEmpty()); + // Assigned by me + Optional result3 = accessControlListController.get("test", "ace3"); + assertTrue(result3.isPresent()); assertEquals(ace3, result3.get()); + // Granted to me + Optional result4 = accessControlListController.get("test", "ace5"); + assertTrue(result4.isPresent()); assertEquals(ace5, result4.get()); - } @Test - void applyAsAdmin_Failure() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() + void applyAsAdminFailure() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder() .resourceType(AccessControlEntry.ResourceType.TOPIC) .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) .build(); - Authentication auth = Authentication.build("admin", Map.of("roles",List.of("isAdmin()"))); + Authentication auth = Authentication.build("admin", Map.of("roles", List.of("isAdmin()"))); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validateAsAdmin(ace1, ns)) - .thenReturn(List.of("ValidationError")); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validateAsAdmin(ace1, ns)).thenReturn(List.of("ValidationError")); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> accessControlListController.apply(auth,"test", ace1, false)); + () -> accessControlListController.apply(auth, "test", ace1, false)); assertEquals(1, actual.getValidationErrors().size()); } @Test - void applyAsAdmin_Success() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("acl-test").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - Authentication auth = Authentication.build("admin", Map.of("roles",List.of("isAdmin()"))); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validateAsAdmin(ace1, ns)) - .thenReturn(List.of()); - when(accessControlEntryService.create(ace1)) - .thenReturn(ace1); + void applyAsAdminSuccess() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = AccessControlEntry.builder().metadata(ObjectMeta.builder().name("acl-test").build()) + .spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); + Authentication auth = Authentication.build("admin", Map.of("roles", List.of("isAdmin()"))); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validateAsAdmin(ace1, ns)).thenReturn(List.of()); + when(accessControlEntryService.create(ace1)).thenReturn(ace1); - var response = accessControlListController.apply(auth,"test", ace1, false); + var response = accessControlListController.apply(auth, "test", ace1, false); AccessControlEntry actual = response.body(); assertEquals("created", response.header("X-Ns4kafka-Result")); assertEquals("test", actual.getMetadata().getNamespace()); @@ -304,59 +236,39 @@ void applyAsAdmin_Success() { @Test void applyValidationErrors() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder() .resourceType(AccessControlEntry.ResourceType.TOPIC) .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of("ValidationError")); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of("ValidationError")); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> accessControlListController.apply(auth,"test", ace1, false)); + () -> accessControlListController.apply(auth, "test", ace1, false)); assertEquals(1, actual.getValidationErrors().size()); } @Test void applySuccess() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of()); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = AccessControlEntry.builder().metadata(ObjectMeta.builder().build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()).build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of()); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - when(accessControlEntryService.create(ace1)) - .thenReturn(ace1); + when(accessControlEntryService.create(ace1)).thenReturn(ace1); var response = accessControlListController.apply(auth, "test", ace1, false); AccessControlEntry actual = response.body(); @@ -364,122 +276,76 @@ void applySuccess() { assertEquals("test", actual.getMetadata().getNamespace()); assertEquals("local", actual.getMetadata().getCluster()); } - @Test - void applySuccess_AlreadyExists() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of()); - when(accessControlEntryService.findByName("test","ace1")) - .thenReturn(Optional.of(ace1)); + @Test + void applySuccessAlreadyExists() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = AccessControlEntry.builder().metadata(ObjectMeta.builder().name("ace1").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()).build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of()); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1)); var response = accessControlListController.apply(auth, "test", ace1, false); AccessControlEntry actual = response.body(); assertEquals("unchanged", response.header("X-Ns4kafka-Result")); assertEquals("test", actual.getMetadata().getNamespace()); assertEquals("local", actual.getMetadata().getCluster()); - verify(accessControlEntryService,never()).create(ArgumentMatchers.any()); + verify(accessControlEntryService, never()).create(ArgumentMatchers.any()); } @Test - void applyFailed_ChangedSpec() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - AccessControlEntry ace1Old = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) //This line was changed - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of()); - when(accessControlEntryService.findByName("test","ace1")) - .thenReturn(Optional.of(ace1Old)); + void applyFailedChangedSpec() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = AccessControlEntry.builder().metadata(ObjectMeta.builder().name("ace1").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()).build(); + AccessControlEntry ace1Old = AccessControlEntry.builder().metadata(ObjectMeta.builder().name("ace1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) //This line was changed + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()).build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of()); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1Old)); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> accessControlListController.apply(auth,"test", ace1, false)); + () -> accessControlListController.apply(auth, "test", ace1, false)); assertEquals(1, actual.getValidationErrors().size()); - assertEquals("Invalid modification: `spec` is immutable. You can still update `metadata`", actual.getValidationErrors().get(0)); + assertEquals("Invalid modification: `spec` is immutable. You can still update `metadata`", + actual.getValidationErrors().get(0)); } @Test - void applySuccess_ChangedMetadata() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + void applySuccessChangedMetadata() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ace1") - .labels(Map.of("new-label", "label-value")) // This label is new - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - AccessControlEntry ace1Old = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of()); - when(accessControlEntryService.findByName("test","ace1")) - .thenReturn(Optional.of(ace1Old)); - when(accessControlEntryService.create(ace1)) - .thenReturn(ace1); + .metadata(ObjectMeta.builder().name("ace1").labels(Map.of("new-label", "label-value")) // This label is new + .build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); + AccessControlEntry ace1Old = AccessControlEntry.builder().metadata(ObjectMeta.builder().name("ace1").build()) + .spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of()); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1Old)); + when(accessControlEntryService.create(ace1)).thenReturn(ace1); var response = accessControlListController.apply(auth, "test", ace1, false); AccessControlEntry actual = response.body(); @@ -491,43 +357,26 @@ void applySuccess_ChangedMetadata() { } @Test - void applySuccess_ChangedMetadataDryRun() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + void applySuccessChangedMetadataDryRun() { + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ace1") - .labels(Map.of("new-label", "label-value")) // This label is new - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - AccessControlEntry ace1Old = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of()); - when(accessControlEntryService.findByName("test","ace1")) - .thenReturn(Optional.of(ace1Old)); + .metadata(ObjectMeta.builder().name("ace1").labels(Map.of("new-label", "label-value")) // This label is new + .build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); + AccessControlEntry ace1Old = AccessControlEntry.builder().metadata(ObjectMeta.builder().name("ace1").build()) + .spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) + .build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of()); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1Old)); var response = accessControlListController.apply(auth, "test", ace1, true); AccessControlEntry actual = response.body(); @@ -540,213 +389,139 @@ void applySuccess_ChangedMetadataDryRun() { @Test void applyDryRunAdmin() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); + AccessControlEntry ace1 = + AccessControlEntry.builder().metadata(ObjectMeta.builder().namespace("admin").cluster("local").build()) .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build() - ) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()) .build(); - Authentication auth = Authentication.build("admin", Map.of("roles",List.of("isAdmin()"))); + Authentication auth = Authentication.build("admin", Map.of("roles", List.of("isAdmin()"))); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validateAsAdmin(ace1, ns)) - .thenReturn(List.of()); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validateAsAdmin(ace1, ns)).thenReturn(List.of()); var response = accessControlListController.apply(auth, "test", ace1, true); - AccessControlEntry actual = response.body(); + assertEquals("created", response.header("X-Ns4kafka-Result")); verify(accessControlEntryService, never()).create(ArgumentMatchers.any()); } @Test void applyDryRun() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); - AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("prefix") - .grantedTo("test") - .build()) - .build(); + AccessControlEntry ace1 = AccessControlEntry.builder().metadata(ObjectMeta.builder().build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER).resource("prefix").grantedTo("test").build()).build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.validate(ace1, ns)) - .thenReturn(List.of()); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.validate(ace1, ns)).thenReturn(List.of()); var response = accessControlListController.apply(auth, "test", ace1, true); - AccessControlEntry actual = response.body(); + assertEquals("created", response.header("X-Ns4kafka-Result")); verify(accessControlEntryService, never()).create(ace1); } - /** - * Validate ACL deletion fail when not found - */ @Test void deleteFailNotFound() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findByName("test", "ace1")) - .thenReturn(Optional.empty()); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.empty()); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> accessControlListController.delete(auth,"test", "ace1", false)); + () -> accessControlListController.delete(auth, "test", "ace1", false)); - assertLinesMatch(List.of("Invalid value ace1 for name: ACL does not exist in this namespace."), actual.getValidationErrors()); + assertLinesMatch(List.of("Invalid value ace1 for name: ACL does not exist in this namespace."), + actual.getValidationErrors()); } - /** - * Validate ACL deletion failed for self assigned - */ @Test void deleteFailSelfAssigned() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("prefix") - .grantedTo("test") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("prefix").grantedTo("test").build()) + .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findByName("test", "ace1")) - .thenReturn(Optional.of(ace1)); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1)); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> accessControlListController.delete(auth,"test", "ace1", false)); + () -> accessControlListController.delete(auth, "test", "ace1", false)); - assertLinesMatch( - List.of("Only admins.*"), - actual.getValidationErrors()); + assertLinesMatch(List.of("Only admins.*"), actual.getValidationErrors()); } - /** - * Validate ACL deletion as admin - */ @Test void deleteSuccessSelfAssigned_AsAdmin() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("prefix") - .grantedTo("test") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("prefix").grantedTo("test").build()) + .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of("isAdmin()"))); + Authentication auth = Authentication.build("user", Map.of("roles", List.of("isAdmin()"))); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findByName("test", "ace1")) - .thenReturn(Optional.of(ace1)); + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1)); - HttpResponse actual = accessControlListController.delete(auth,"test", "ace1", false); + HttpResponse actual = accessControlListController.delete(auth, "test", "ace1", false); assertEquals(HttpStatus.NO_CONTENT, actual.status()); } - /** - * Validate ACL deletion - */ @Test void deleteSuccess() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("prefix") - .grantedTo("namespace-other") - .build()) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findByName("test", "ace1")) - .thenReturn(Optional.of(ace1)); + .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("prefix").grantedTo("namespace-other") + .build()).build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - HttpResponse actual = accessControlListController.delete(auth,"test", "ace1", false); + HttpResponse actual = accessControlListController.delete(auth, "test", "ace1", false); assertEquals(HttpStatus.NO_CONTENT, actual.status()); } - /** - * Validate ACL deletion in dry mode - */ @Test void deleteDryRun() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("test").cluster("local").build()) - .build(); + Namespace ns = Namespace.builder().metadata(ObjectMeta.builder().name("test").cluster("local").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("prefix") - .grantedTo("namespace-other") - .build() - ) - .build(); - Authentication auth = Authentication.build("user", Map.of("roles",List.of())); - - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(accessControlEntryService.findByName("test", "ace1")) - .thenReturn(Optional.of(ace1)); - HttpResponse actual = accessControlListController.delete(auth,"test", "ace1", true); + .metadata(ObjectMeta.builder().name("ace1").namespace("test").cluster("local").build()).spec( + AccessControlEntry.AccessControlEntrySpec.builder().resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ).resource("prefix").grantedTo("namespace-other") + .build()).build(); + Authentication auth = Authentication.build("user", Map.of("roles", List.of())); + + when(accessControlEntryService.findByName("test", "ace1")).thenReturn(Optional.of(ace1)); + HttpResponse actual = accessControlListController.delete(auth, "test", "ace1", true); verify(accessControlEntryService, never()).delete(any(), any()); assertEquals(HttpStatus.NO_CONTENT, actual.status()); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/AclNonNamespacedControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/AclNonNamespacedControllerTest.java index 0f2cb544..e869fa1d 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/AclNonNamespacedControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/AclNonNamespacedControllerTest.java @@ -1,42 +1,35 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.acl.AclNonNamespacedController; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.services.AccessControlEntryService; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class AclNonNamespacedControllerTest { - /** - * The mocked ACL service - */ @Mock AccessControlEntryService accessControlEntryService; - /** - * The mocked ACL controller - */ @InjectMocks AclNonNamespacedController aclNonNamespacedController; @Test void listAll() { AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); when(accessControlEntryService.findAll()).thenReturn(List.of(ace1, ace2)); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java index e6bba8c1..1317465c 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerTest.java @@ -1,27 +1,27 @@ package com.michelin.ns4kafka.controllers; -import com.michelin.ns4kafka.config.AkhqClaimProviderControllerConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.properties.AkhqProperties; import com.michelin.ns4kafka.services.AccessControlEntryService; import com.michelin.ns4kafka.services.NamespaceService; +import java.util.List; +import java.util.Map; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertLinesMatch; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class AkhqClaimProviderControllerTest { @Mock @@ -34,304 +34,307 @@ class AkhqClaimProviderControllerTest { AkhqClaimProviderController akhqClaimProviderController; @Spy - AkhqClaimProviderControllerConfig akhqClaimProviderControllerConfig = getAkhqClaimProviderControllerConfig(); + AkhqProperties akhqProperties = getAkhqClaimProviderControllerConfig(); - private AkhqClaimProviderControllerConfig getAkhqClaimProviderControllerConfig() { - AkhqClaimProviderControllerConfig config = new AkhqClaimProviderControllerConfig(); + private AkhqProperties getAkhqClaimProviderControllerConfig() { + AkhqProperties config = new AkhqProperties(); config.setGroupLabel("support-group"); config.setFormerRoles(List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" )); config.setAdminGroup("GP-ADMIN"); config.setFormerAdminRoles(List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" )); return config; } @Test - void computeAllowedRegexListTestEmpty(){ - List inputACLs = List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic1") - .build()) - .build() + void computeAllowedRegexListTestEmpty() { + List inputAcls = List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic1") + .build()) + .build() ); - List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputACLs, AccessControlEntry.ResourceType.CONNECT); + List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputAcls, + AccessControlEntry.ResourceType.CONNECT); assertEquals(1, actual.size()); assertEquals("^none$", actual.get(0)); } @Test - void computeAllowedRegexListTestSuccess(){ - List inputACLs = List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.connects") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.GROUP) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build() + void computeAllowedRegexListTestSuccess() { + List inputAcls = List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.connects") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.GROUP) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build() ); - List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputACLs, AccessControlEntry.ResourceType.TOPIC); + List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputAcls, + AccessControlEntry.ResourceType.TOPIC); assertEquals(2, actual.size()); assertLinesMatch( - List.of( - "^\\Qproject1.\\E.*$", - "^\\Qproject2.topic1\\E$" - ), - actual + List.of( + "^\\Qproject1.\\E.*$", + "^\\Qproject2.topic1\\E$" + ), + actual ); Assertions.assertFalse(actual.contains("^\\Qproject1.connects\\E.*$")); } @Test - void computeAllowedRegexListTestSuccessDistinct(){ - List inputACLs = List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build() + void computeAllowedRegexListTestSuccessDistinct() { + List inputAcls = List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build() ); - List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputACLs, AccessControlEntry.ResourceType.TOPIC); + List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputAcls, + AccessControlEntry.ResourceType.TOPIC); assertEquals(1, actual.size()); assertLinesMatch( - List.of( - "^\\Qproject1.\\E.*$" - ), - actual + List.of( + "^\\Qproject1.\\E.*$" + ), + actual ); } @Test - void generateClaimTestNullOrEmptyRequest(){ - AkhqClaimProviderController.AKHQClaimResponse actual = akhqClaimProviderController.generateClaim(null); + void generateClaimTestNullOrEmptyRequest() { + AkhqClaimProviderController.AkhqClaimResponse actual = akhqClaimProviderController.generateClaim(null); assertEquals(1, actual.getAttributes().get("topicsFilterRegexp").size()); assertEquals("^none$", actual.getAttributes().get("topicsFilterRegexp").get(0)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder().build(); + AkhqClaimProviderController.AkhqClaimRequest request = + AkhqClaimProviderController.AkhqClaimRequest.builder().build(); actual = akhqClaimProviderController.generateClaim(request); assertEquals(1, actual.getAttributes().get("topicsFilterRegexp").size()); assertEquals("^none$", actual.getAttributes().get("topicsFilterRegexp").get(0)); - request = AkhqClaimProviderController.AKHQClaimRequest.builder().groups(List.of()).build(); + request = AkhqClaimProviderController.AkhqClaimRequest.builder().groups(List.of()).build(); actual = akhqClaimProviderController.generateClaim(request); assertEquals(1, actual.getAttributes().get("topicsFilterRegexp").size()); assertEquals("^none$", actual.getAttributes().get("topicsFilterRegexp").get(0)); assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - actual.getRoles() + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), + actual.getRoles() ); } @Test - void generateClaimTestSuccess(){ + void generateClaimTestSuccess() { Namespace ns1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1") - .labels(Map.of("support-group","GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1") + .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns2") - .labels(Map.of("support-group","GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns2") + .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); Namespace ns3 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns3") - .labels(Map.of("support-group","GP-PROJECT2-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns3") + .labels(Map.of("support-group", "GP-PROJECT2-SUPPORT")) + .build()) + .build(); Namespace ns4 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns4") - .labels(Map.of("other-key","anything")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns4") + .labels(Map.of("other-key", "anything")) + .build()) + .build(); Namespace ns5 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns5") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns5") + .build()) + .build(); AccessControlEntry ns1Ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); AccessControlEntry ns1Ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_c.") - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_c.") + .build()) + .build(); AccessControlEntry ns2Ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project2_t.") - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project2_t.") + .build()) + .build(); AccessControlEntry ns2Ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") // ACL granted by ns1 to ns2 - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") // ACL granted by ns1 to ns2 + .build()) + .build(); AccessControlEntry ns3Ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project3_topic") - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project3_topic") + .build()) + .build(); AccessControlEntry pubAce1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("public_t.") - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("public_t.") + .build()) + .build(); when(namespaceService.listAll()) - .thenReturn(List.of(ns1, ns2, ns3, ns4, ns5)); + .thenReturn(List.of(ns1, ns2, ns3, ns4, ns5)); when(accessControlEntryService.findAllGrantedToNamespace(ns1)) - .thenReturn(List.of(ns1Ace1, ns1Ace2, pubAce1)); + .thenReturn(List.of(ns1Ace1, ns1Ace2, pubAce1)); when(accessControlEntryService.findAllGrantedToNamespace(ns2)) - .thenReturn(List.of(ns2Ace1, ns2Ace2, pubAce1)); + .thenReturn(List.of(ns2Ace1, ns2Ace2, pubAce1)); when(accessControlEntryService.findAllGrantedToNamespace(ns3)) - .thenReturn(List.of(ns3Ace1, pubAce1)); + .thenReturn(List.of(ns3Ace1, pubAce1)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1-SUPPORT", "GP-PROJECT2-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1-SUPPORT", "GP-PROJECT2-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponse actual = akhqClaimProviderController.generateClaim(request); + AkhqClaimProviderController.AkhqClaimResponse actual = akhqClaimProviderController.generateClaim(request); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllGrantedToNamespace(ns1); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllGrantedToNamespace(ns2); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllGrantedToNamespace(ns3); - Mockito.verify(accessControlEntryService,Mockito.never()).findAllGrantedToNamespace(ns4); - Mockito.verify(accessControlEntryService,Mockito.never()).findAllGrantedToNamespace(ns5); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllPublicGrantedTo(); assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - actual.getRoles() - ); - + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), actual.getRoles()); + assertEquals(4, actual.getAttributes().get("topicsFilterRegexp").size()); assertLinesMatch( - List.of( - "^\\Qproject1_t.\\E.*$", - "^\\Qpublic_t.\\E.*$", - "^\\Qproject2_t.\\E.*$", - "^\\Qproject3_topic\\E$" - ), - actual.getAttributes().get("topicsFilterRegexp") + List.of( + "^\\Qproject1_t.\\E.*$", + "^\\Qpublic_t.\\E.*$", + "^\\Qproject2_t.\\E.*$", + "^\\Qproject3_topic\\E$" + ), + actual.getAttributes().get("topicsFilterRegexp") ); + verify(accessControlEntryService).findAllGrantedToNamespace(ns1); + verify(accessControlEntryService).findAllGrantedToNamespace(ns2); + verify(accessControlEntryService).findAllGrantedToNamespace(ns3); + verify(accessControlEntryService, never()).findAllGrantedToNamespace(ns4); + verify(accessControlEntryService, never()).findAllGrantedToNamespace(ns5); + verify(accessControlEntryService).findAllPublicGrantedTo(); } + @Test void generateClaimTestSuccessAdmin() { - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-ADMIN")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-ADMIN")) + .build(); - AkhqClaimProviderController.AKHQClaimResponse actual = akhqClaimProviderController.generateClaim(request); + AkhqClaimProviderController.AkhqClaimResponse actual = akhqClaimProviderController.generateClaim(request); assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - actual.getRoles() + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), + actual.getRoles() ); // Admin Regexp assertLinesMatch(List.of(".*$"), actual.getAttributes().get("topicsFilterRegexp")); @@ -340,174 +343,173 @@ void generateClaimTestSuccessAdmin() { } @Test - void generateClaimV2TestSuccess(){ + void generateClaimV2TestSuccess() { Namespace ns1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1") - .labels(Map.of("support-group","GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1") + .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns2") - .labels(Map.of("support-group","GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns2") + .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); Namespace ns3 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns3") - .labels(Map.of("support-group","GP-PROJECT2-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns3") + .labels(Map.of("support-group", "GP-PROJECT2-SUPPORT")) + .build()) + .build(); Namespace ns4 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns4") - .labels(Map.of("other-key","anything")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns4") + .labels(Map.of("other-key", "anything")) + .build()) + .build(); Namespace ns5 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns5") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns5") + .build()) + .build(); - AccessControlEntry ns1_ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); - AccessControlEntry ns1_ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_c.") - .build()) - .build(); - AccessControlEntry ns2_ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project2_t.") - .build()) - .build(); - AccessControlEntry ns2_ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") // ACL granted by ns1 to ns2 - .build()) - .build(); - AccessControlEntry ns3_ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project3_topic") - .build()) - .build(); - AccessControlEntry pub_ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("public_t.") - .build()) - .build(); + AccessControlEntry ns1Ace1 = AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + AccessControlEntry ns1Ace2 = AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_c.") + .build()) + .build(); + AccessControlEntry ns2Ace1 = AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project2_t.") + .build()) + .build(); + AccessControlEntry ns2Ace2 = AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") // ACL granted by ns1 to ns2 + .build()) + .build(); + AccessControlEntry ns3Ace1 = AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project3_topic") + .build()) + .build(); + AccessControlEntry pubAce1 = AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("public_t.") + .build()) + .build(); when(namespaceService.listAll()) - .thenReturn(List.of(ns1, ns2, ns3, ns4, ns5)); + .thenReturn(List.of(ns1, ns2, ns3, ns4, ns5)); when(accessControlEntryService.findAllGrantedToNamespace(ns1)) - .thenReturn(List.of(ns1_ace1, ns1_ace2, pub_ace1)); + .thenReturn(List.of(ns1Ace1, ns1Ace2, pubAce1)); when(accessControlEntryService.findAllGrantedToNamespace(ns2)) - .thenReturn(List.of(ns2_ace1, ns2_ace2, pub_ace1)); + .thenReturn(List.of(ns2Ace1, ns2Ace2, pubAce1)); when(accessControlEntryService.findAllGrantedToNamespace(ns3)) - .thenReturn(List.of(ns3_ace1, pub_ace1)); + .thenReturn(List.of(ns3Ace1, pubAce1)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1-SUPPORT", "GP-PROJECT2-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1-SUPPORT", "GP-PROJECT2-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV2 actual = akhqClaimProviderController.generateClaimV2(request); + AkhqClaimProviderController.AkhqClaimResponseV2 actual = akhqClaimProviderController.generateClaimV2(request); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllGrantedToNamespace(ns1); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllGrantedToNamespace(ns2); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllGrantedToNamespace(ns3); - Mockito.verify(accessControlEntryService,Mockito.never()).findAllGrantedToNamespace(ns4); - Mockito.verify(accessControlEntryService,Mockito.never()).findAllGrantedToNamespace(ns5); - Mockito.verify(accessControlEntryService,Mockito.times(1)).findAllPublicGrantedTo(); assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - actual.getRoles() - ); + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), actual.getRoles()); assertEquals(4, actual.getTopicsFilterRegexp().size()); assertLinesMatch( - List.of( - "^\\Qproject1_t.\\E.*$", - "^\\Qpublic_t.\\E.*$", - "^\\Qproject2_t.\\E.*$", - "^\\Qproject3_topic\\E$" - ), - actual.getTopicsFilterRegexp() + List.of( + "^\\Qproject1_t.\\E.*$", + "^\\Qpublic_t.\\E.*$", + "^\\Qproject2_t.\\E.*$", + "^\\Qproject3_topic\\E$" + ), + actual.getTopicsFilterRegexp() ); + verify(accessControlEntryService).findAllGrantedToNamespace(ns1); + verify(accessControlEntryService).findAllGrantedToNamespace(ns2); + verify(accessControlEntryService).findAllGrantedToNamespace(ns3); + verify(accessControlEntryService, never()).findAllGrantedToNamespace(ns4); + verify(accessControlEntryService, never()).findAllGrantedToNamespace(ns5); + verify(accessControlEntryService).findAllPublicGrantedTo(); } @Test - void generateClaimV2TestNullOrEmptyRequest(){ - AkhqClaimProviderController.AKHQClaimResponseV2 actual = akhqClaimProviderController.generateClaimV2(null); + void generateClaimV2TestNullOrEmptyRequest() { + AkhqClaimProviderController.AkhqClaimResponseV2 actual = akhqClaimProviderController.generateClaimV2(null); assertEquals(1, actual.getTopicsFilterRegexp().size()); assertEquals("^none$", actual.getTopicsFilterRegexp().get(0)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder().build(); + AkhqClaimProviderController.AkhqClaimRequest request = + AkhqClaimProviderController.AkhqClaimRequest.builder().build(); actual = akhqClaimProviderController.generateClaimV2(request); assertEquals(1, actual.getTopicsFilterRegexp().size()); assertEquals("^none$", actual.getTopicsFilterRegexp().get(0)); - request = AkhqClaimProviderController.AKHQClaimRequest.builder().groups(List.of()).build(); + request = AkhqClaimProviderController.AkhqClaimRequest.builder().groups(List.of()).build(); actual = akhqClaimProviderController.generateClaimV2(request); assertEquals(1, actual.getTopicsFilterRegexp().size()); assertEquals("^none$", actual.getTopicsFilterRegexp().get(0)); assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - actual.getRoles() + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), + actual.getRoles() ); } @Test void generateClaimV2TestSuccessAdmin() { - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-ADMIN")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-ADMIN")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV2 actual = akhqClaimProviderController.generateClaimV2(request); + AkhqClaimProviderController.AkhqClaimResponseV2 actual = akhqClaimProviderController.generateClaimV2(request); // AdminRoles assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - actual.getRoles() + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), + actual.getRoles() ); // Admin Regexp assertLinesMatch(List.of(".*$"), actual.getTopicsFilterRegexp()); @@ -516,69 +518,70 @@ void generateClaimV2TestSuccessAdmin() { } @Test - void computeAllowedRegexListTestSuccessFilterStartWith(){ - List inputACLs = List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project1.topic1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic2") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic3") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project3.topic4") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project3.topic5") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project3.") - .build()) - .build() + void computeAllowedRegexListTestSuccessFilterStartWith() { + List inputAcls = List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project1.topic1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic2") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic3") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project3.topic4") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project3.topic5") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project3.") + .build()) + .build() ); - List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputACLs, AccessControlEntry.ResourceType.TOPIC); + List actual = akhqClaimProviderController.computeAllowedRegexListForResourceType(inputAcls, + AccessControlEntry.ResourceType.TOPIC); assertEquals(4, actual.size()); assertLinesMatch( - List.of( - "^\\Qproject1.\\E.*$", - "^\\Qproject2.topic2\\E$", - "^\\Qproject2.topic3\\E$", - "^\\Qproject3.\\E.*$" - ), - actual + List.of( + "^\\Qproject1.\\E.*$", + "^\\Qproject2.topic2\\E$", + "^\\Qproject2.topic3\\E$", + "^\\Qproject3.\\E.*$" + ), + actual ); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerV3Test.java b/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerV3Test.java index 8c65cca8..e25a516f 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerV3Test.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderControllerV3Test.java @@ -1,24 +1,24 @@ package com.michelin.ns4kafka.controllers; -import com.michelin.ns4kafka.config.AkhqClaimProviderControllerConfig; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.properties.AkhqProperties; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.services.AccessControlEntryService; import com.michelin.ns4kafka.services.NamespaceService; +import java.util.List; +import java.util.Map; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; - @ExtendWith(MockitoExtension.class) class AkhqClaimProviderControllerV3Test { @Mock @@ -31,91 +31,94 @@ class AkhqClaimProviderControllerV3Test { AkhqClaimProviderController akhqClaimProviderController; @Spy - AkhqClaimProviderControllerConfig akhqClaimProviderControllerConfig = getAkhqClaimProviderControllerConfig(); + AkhqProperties akhqProperties = getAkhqClaimProviderControllerConfig(); - private AkhqClaimProviderControllerConfig getAkhqClaimProviderControllerConfig() { - AkhqClaimProviderControllerConfig config = new AkhqClaimProviderControllerConfig(); + private AkhqProperties getAkhqClaimProviderControllerConfig() { + AkhqProperties config = new AkhqProperties(); config.setGroupLabel("support-group"); config.setAdminGroup("GP-ADMIN"); config.setRoles(Map.of(AccessControlEntry.ResourceType.TOPIC, "topic-read", - AccessControlEntry.ResourceType.CONNECT, "connect-rw", - AccessControlEntry.ResourceType.SCHEMA, "registry-read")); + AccessControlEntry.ResourceType.CONNECT, "connect-rw", + AccessControlEntry.ResourceType.SCHEMA, "registry-read")); config.setAdminRoles(Map.of(AccessControlEntry.ResourceType.TOPIC, "topic-admin", - AccessControlEntry.ResourceType.CONNECT, "connect-admin", - AccessControlEntry.ResourceType.SCHEMA, "registry-admin")); + AccessControlEntry.ResourceType.CONNECT, "connect-admin", + AccessControlEntry.ResourceType.SCHEMA, "registry-admin")); return config; } @Test void generateClaimHappyPath() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder().name("ns1").cluster("cluster1") - .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1").cluster("cluster1") + .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); AccessControlEntry ace1Ns1Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); - - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()) - .thenReturn(List.of(ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) - .thenReturn(List.of(ace1Ns1Cluster1)); - - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1-SUPPORT")) - .build(); - - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()) + .thenReturn(List.of(ns1Cluster1)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) + .thenReturn(List.of(ace1Ns1Cluster1)); + + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1-SUPPORT")) + .build(); + + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertEquals(actual.getGroups().size(), 1); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(2, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1_t.\\E.*$"), groups.get(0).getPatterns()); Assertions.assertEquals(List.of("^cluster1$"), groups.get(0).getClusters()); Assertions.assertEquals("registry-read", groups.get(1).getRole()); } + @Test void generateClaimMultipleSupportGroups() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder().name("ns1").cluster("cluster1") - .labels(Map.of("support-group", "GP-PROJECT1-DEV,GP-PROJECT1-SUPPORT,GP-PROJECT1-OPS")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1").cluster("cluster1") + .labels(Map.of("support-group", "GP-PROJECT1-DEV,GP-PROJECT1-SUPPORT,GP-PROJECT1-OPS")) + .build()) + .build(); AccessControlEntry ace1Ns1Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); - - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()) - .thenReturn(List.of(ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) - .thenReturn(List.of(ace1Ns1Cluster1)); - - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1-SUPPORT")) - .build(); - - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()) + .thenReturn(List.of(ns1Cluster1)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) + .thenReturn(List.of(ace1Ns1Cluster1)); + + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1-SUPPORT")) + .build(); + + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertEquals(actual.getGroups().size(), 1); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(2, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1_t.\\E.*$"), groups.get(0).getPatterns()); @@ -126,67 +129,72 @@ void generateClaimMultipleSupportGroups() { @Test void generateClaimNoPermissions() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder().name("ns1").cluster("cluster1") - .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1").cluster("cluster1") + .labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1)); + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT2-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT2-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertNull(actual.getGroups()); } @Test void generateClaimWithOptimizedClusters() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); Namespace ns1Cluster2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster2").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster2").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); AccessControlEntry ace1Ns1Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns1Cluster2)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) + .thenReturn(List.of(ace1Ns1Cluster1)); AccessControlEntry ace1Ns1Cluster2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns1Cluster2)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(List.of(ace1Ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster2)).thenReturn(List.of(ace1Ns1Cluster2)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster2)) + .thenReturn(List.of(ace1Ns1Cluster2)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertEquals(1, actual.getGroups().size()); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(2, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1_t.\\E.*$"), groups.get(0).getPatterns()); @@ -197,48 +205,52 @@ void generateClaimWithOptimizedClusters() { @Test void generateClaimWithMultiplePatternsOnSameCluster() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) + .build()) + .build(); Namespace ns2Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns2").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns2").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) + .build()) + .build(); AccessControlEntry ace1Ns1Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns2Cluster1)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) + .thenReturn(List.of(ace1Ns1Cluster1)); AccessControlEntry ace2Ns2Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project2_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project2_t.") + .build()) + .build(); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns2Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(List.of(ace1Ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns2Cluster1)).thenReturn(List.of(ace2Ns2Cluster1)); + when(accessControlEntryService.findAllGrantedToNamespace(ns2Cluster1)) + .thenReturn(List.of(ace2Ns2Cluster1)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1&2-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1&2-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertEquals(actual.getGroups().size(), 1); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(2, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1_t.\\E.*$", "^\\Qproject2_t.\\E.*$"), groups.get(0).getPatterns()); @@ -249,48 +261,52 @@ void generateClaimWithMultiplePatternsOnSameCluster() { @Test void generateClaimWithMultipleGroups() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); Namespace ns1Cluster2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster2").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster2").labels(Map.of("support-group", "GP-PROJECT1-SUPPORT")) + .build()) + .build(); AccessControlEntry ace1Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns1Cluster2)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) + .thenReturn(List.of(ace1Cluster1)); AccessControlEntry ace1Cluster2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns1Cluster2)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(List.of(ace1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster2)).thenReturn(List.of(ace1Cluster2)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster2)) + .thenReturn(List.of(ace1Cluster2)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertEquals(actual.getGroups().size(), 1); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(2, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1_t.\\E.*$"), groups.get(0).getPatterns()); @@ -301,48 +317,52 @@ void generateClaimWithMultipleGroups() { @Test void generateClaimWithPatternOnMultipleClusters() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) + .build()) + .build(); Namespace ns2Cluster2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns2").cluster("cluster2").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns2").cluster("cluster2").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) + .build()) + .build(); AccessControlEntry ace1Ns1Cluster1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1_t.") + .build()) + .build(); + + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns2Cluster2)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)) + .thenReturn(List.of(ace1Ns1Cluster1)); AccessControlEntry ace1Ns2Cluster2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project2_t.") - .build()) - .build(); + .metadata(ObjectMeta.builder().cluster("cluster2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project2_t.") + .build()) + .build(); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1, ns2Cluster2)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(List.of(ace1Ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns2Cluster2)).thenReturn(List.of(ace1Ns2Cluster2)); + when(accessControlEntryService.findAllGrantedToNamespace(ns2Cluster2)) + .thenReturn(List.of(ace1Ns2Cluster2)); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1&2-SUPPORT")) - .build(); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1&2-SUPPORT")) + .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); Assertions.assertEquals(actual.getGroups().size(), 1); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(4, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1_t.\\E.*$"), groups.get(0).getPatterns()); @@ -359,192 +379,196 @@ void generateClaimWithPatternOnMultipleClusters() { } @Test - void generateClaimAndOptimizePatterns(){ + void generateClaimAndOptimizePatterns() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) - .build()) - .build(); - List inputACLs = List.of( - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project1.topic1") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project1.topic1") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic2") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic2a") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project2.topic3") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project2.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project3.topic4") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resource("project3.topic5") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project3.") - .build()) - .build() + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) + .build()) + .build(); + + List inputAcls = List.of( + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project1.topic1") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project1.topic1") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic2") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic2a") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project2.topic3") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project2.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project3.topic4") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resource("project3.topic5") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project3.") + .build()) + .build() ); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), new KafkaAsyncExecutorConfig("cluster2")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(inputACLs); + akhqClaimProviderController.managedClusters = + List.of(new ManagedClusterProperties("cluster1"), new ManagedClusterProperties("cluster2")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(inputAcls); - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1&2-SUPPORT")) - .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1&2-SUPPORT")) + .build(); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); - List groups = actual.getGroups().get("group"); + List groups = actual.getGroups().get("group"); Assertions.assertEquals(3, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals( - List.of("^\\Qproject1.\\E.*$", "^\\Qproject2.topic2\\E$", "^\\Qproject2.topic2a\\E$", "^\\Qproject2.topic3\\E$", "^\\Qproject3.\\E.*$"), - groups.get(0).getPatterns() + List.of("^\\Qproject1.\\E.*$", "^\\Qproject2.topic2\\E$", "^\\Qproject2.topic2a\\E$", + "^\\Qproject2.topic3\\E$", "^\\Qproject3.\\E.*$"), + groups.get(0).getPatterns() ); Assertions.assertEquals("connect-rw", groups.get(1).getRole()); Assertions.assertEquals( - List.of("^\\Qproject1.topic1\\E$", "^\\Qproject2.\\E.*$"), - groups.get(1).getPatterns() + List.of("^\\Qproject1.topic1\\E$", "^\\Qproject2.\\E.*$"), + groups.get(1).getPatterns() ); Assertions.assertEquals("registry-read", groups.get(2).getRole()); Assertions.assertEquals( - List.of("^\\Qproject1.\\E.*$", "^\\Qproject2.topic2\\E$", "^\\Qproject2.topic2a\\E$", "^\\Qproject2.topic3\\E$", "^\\Qproject3.\\E.*$"), - groups.get(2).getPatterns() + List.of("^\\Qproject1.\\E.*$", "^\\Qproject2.topic2\\E$", "^\\Qproject2.topic2a\\E$", + "^\\Qproject2.topic3\\E$", "^\\Qproject3.\\E.*$"), + groups.get(2).getPatterns() ); } @Test - void generateClaimAndOptimizePatternsForDifferentClusters(){ + void generateClaimAndOptimizePatternsForDifferentClusters() { Namespace ns1Cluster1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) - .build()) - .build(); - - List inputACLs = List.of( - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project1.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project2.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project3.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project3.") - .build()) - .build(), - AccessControlEntry.builder() - .metadata(ObjectMeta.builder().cluster("cluster3").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resource("project3.") - .build()) - .build() + .metadata(ObjectMeta.builder() + .name("ns1").cluster("cluster1").labels(Map.of("support-group", "GP-PROJECT1&2-SUPPORT")) + .build()) + .build(); + + List inputAcls = List.of( + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project1.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project2.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project3.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project3.") + .build()) + .build(), + AccessControlEntry.builder() + .metadata(ObjectMeta.builder().cluster("cluster3").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resource("project3.") + .build()) + .build() ); - akhqClaimProviderController.managedClusters = List.of(new KafkaAsyncExecutorConfig("cluster1"), - new KafkaAsyncExecutorConfig("cluster2"), new KafkaAsyncExecutorConfig("cluster3") - , new KafkaAsyncExecutorConfig("cluster4")); - Mockito.when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1)); - Mockito.when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(inputACLs); - - AkhqClaimProviderController.AKHQClaimRequest request = AkhqClaimProviderController.AKHQClaimRequest.builder() - .groups(List.of("GP-PROJECT1&2-SUPPORT")) - .build(); - AkhqClaimProviderController.AKHQClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); - - List groups = actual.getGroups().get("group"); + akhqClaimProviderController.managedClusters = List.of(new ManagedClusterProperties("cluster1"), + new ManagedClusterProperties("cluster2"), new ManagedClusterProperties("cluster3"), + new ManagedClusterProperties("cluster4")); + when(namespaceService.listAll()).thenReturn(List.of(ns1Cluster1)); + when(accessControlEntryService.findAllGrantedToNamespace(ns1Cluster1)).thenReturn(inputAcls); + + AkhqClaimProviderController.AkhqClaimRequest request = AkhqClaimProviderController.AkhqClaimRequest.builder() + .groups(List.of("GP-PROJECT1&2-SUPPORT")) + .build(); + AkhqClaimProviderController.AkhqClaimResponseV3 actual = akhqClaimProviderController.generateClaimV3(request); + + List groups = actual.getGroups().get("group"); Assertions.assertEquals(6, groups.size()); Assertions.assertEquals("topic-read", groups.get(0).getRole()); Assertions.assertEquals(List.of("^\\Qproject1.\\E.*$"), groups.get(0).getPatterns()); @@ -554,6 +578,6 @@ void generateClaimAndOptimizePatternsForDifferentClusters(){ Assertions.assertEquals(List.of("^cluster1$"), groups.get(1).getClusters()); Assertions.assertEquals("topic-read", groups.get(2).getRole()); Assertions.assertEquals(List.of("^\\Qproject3.\\E.*$"), groups.get(2).getPatterns()); - Assertions.assertEquals(List.of("^cluster1$","^cluster2$","^cluster3$"), groups.get(2).getClusters()); + Assertions.assertEquals(List.of("^cluster1$", "^cluster2$", "^cluster3$"), groups.get(2).getClusters()); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java index 318de954..3f5dee12 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterControllerTest.java @@ -1,5 +1,15 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.connect.ConnectClusterController; import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.Namespace; @@ -17,6 +27,8 @@ import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentMatchers; @@ -26,13 +38,9 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.List; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - +/** + * Connect cluster controller test. + */ @ExtendWith(MockitoExtension.class) class ConnectClusterControllerTest { @Mock @@ -53,172 +61,151 @@ class ConnectClusterControllerTest { @Mock ApplicationEventPublisher applicationEventPublisher; - /** - * Test connect clusters listing when namespace is empty - */ @Test void listEmptyConnectClusters() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.findAllByNamespaceOwner(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); List actual = connectClusterController.list("test"); assertTrue(actual.isEmpty()); } - /** - * Test connect clusters listing - */ @Test void listMultipleConnectClusters() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.findAllByNamespaceOwner(ns)) - .thenReturn(List.of( - ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(), - ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster2") - .build()) - .build())); + .thenReturn(List.of( + ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(), + ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster2") + .build()) + .build())); List actual = connectClusterController.list("test"); assertEquals(2, actual.size()); } - /** - * Test get connect cluster by name when it does not exist - */ @Test void getConnectClusterEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.findByNamespaceAndNameOwner(ns, "missing")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); Optional actual = connectClusterController.getConnectCluster("test", "missing"); assertTrue(actual.isEmpty()); } - /** - * Test get connect cluster by name - */ @Test void getConnectCluster() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")) - .thenReturn(Optional.of( - ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build())); + .thenReturn(Optional.of( + ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build())); Optional actual = connectClusterController.getConnectCluster("test", "connect-cluster"); assertTrue(actual.isPresent()); assertEquals("connect-cluster", actual.get().getMetadata().getName()); } - /** - * Test connect cluster deletion when namespace is not owner - */ @Test void deleteConnectClusterNotOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) - .thenReturn(false); + .thenReturn(false); assertThrows(ResourceValidationException.class, - () -> connectClusterController.delete("test", "connect-cluster", false)); + () -> connectClusterController.delete("test", "connect-cluster", false)); } - /** - * Test connect cluster deletion when not found - */ @Test void deleteConnectClusterNotFound() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) - .thenReturn(true); + .thenReturn(true); when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); HttpResponse actual = connectClusterController.delete("test", "connect-cluster", false); assertEquals(HttpStatus.NOT_FOUND, actual.getStatus()); } - /** - * Test connect cluster deletion when namespace is owner - */ @Test void deleteConnectClusterOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) - .thenReturn(true); + .thenReturn(true); when(connectorService.findAllByConnectCluster(ns, "connect-cluster")) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")) - .thenReturn(Optional.of(connectCluster)); + .thenReturn(Optional.of(connectCluster)); doNothing().when(connectClusterService).delete(connectCluster); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); @@ -228,31 +215,28 @@ void deleteConnectClusterOwned() { assertEquals(HttpStatus.NO_CONTENT, actual.getStatus()); } - /** - * Test connect cluster deletion in dry run mode - */ @Test void deleteConnectClusterOwnedDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) - .thenReturn(true); + .thenReturn(true); when(connectorService.findAllByConnectCluster(ns, "connect-cluster")) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")) - .thenReturn(Optional.of(connectCluster)); + .thenReturn(Optional.of(connectCluster)); HttpResponse actual = connectClusterController.delete("test", "connect-cluster", true); assertEquals(HttpStatus.NO_CONTENT, actual.getStatus()); @@ -260,53 +244,50 @@ void deleteConnectClusterOwnedDryRun() { verify(connectClusterService, never()).delete(any()); } - /** - * Test connect cluster deletion when it has connectors deployed on it - */ @Test void deleteConnectClusterWithConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")) - .thenReturn(true); + .thenReturn(true); when(connectorService.findAllByConnectCluster(ns, "connect-cluster")) - .thenReturn(List.of(connector)); + .thenReturn(List.of(connector)); ResourceValidationException result = assertThrows(ResourceValidationException.class, - () -> connectClusterController.delete("test", "connect-cluster", false)); + () -> connectClusterController.delete("test", "connect-cluster", false)); assertEquals(1, result.getValidationErrors().size()); - assertEquals("The Connect cluster connect-cluster has 1 deployed connector(s): connect1. Please remove the associated connector(s) before deleting it.", result.getValidationErrors().get(0)); + assertEquals( + "The Connect cluster connect-cluster has 1 deployed connector(s): connect1. " + + "Please remove the associated connector(s) before deleting it.", + result.getValidationErrors().get(0)); } - /** - * Validate Connect cluster creation - */ @Test void createNewConnectCluster() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); @@ -319,180 +300,169 @@ void createNewConnectCluster() { when(connectClusterService.create(connectCluster)).thenReturn(connectCluster); StepVerifier.create(connectClusterController.apply("test", connectCluster, false)) - .consumeNextWith(response -> { - assertEquals("created", response.header("X-Ns4kafka-Result")); - assertNotNull(response.body()); - assertEquals("connect-cluster", response.body().getMetadata().getName()); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertEquals("created", response.header("X-Ns4kafka-Result")); + assertNotNull(response.body()); + assertEquals("connect-cluster", response.body().getMetadata().getName()); + }) + .verifyComplete(); } - /** - * Validate Connect cluster creation being not owner - */ @Test void createNewConnectClusterNotOwner() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(false); when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(Mono.just(List.of())); StepVerifier.create(connectClusterController.apply("test", connectCluster, false)) - .consumeErrorWith(error -> { - assertEquals(ResourceValidationException.class, error.getClass()); - assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Namespace not owner of this Connect cluster connect-cluster.", ((ResourceValidationException) error).getValidationErrors().get(0)); - }) - .verify(); + .consumeErrorWith(error -> { + assertEquals(ResourceValidationException.class, error.getClass()); + assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); + assertEquals("Namespace not owner of this Connect cluster connect-cluster.", + ((ResourceValidationException) error).getValidationErrors().get(0)); + }) + .verify(); } - /** - * Validate Connect cluster creation being not owner - */ @Test void createNewConnectClusterValidationError() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); - when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(Mono.just(List.of("Error occurred"))); + when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn( + Mono.just(List.of("Error occurred"))); StepVerifier.create(connectClusterController.apply("test", connectCluster, false)) - .consumeErrorWith(error -> { - assertEquals(ResourceValidationException.class, error.getClass()); - assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Error occurred", ((ResourceValidationException) error).getValidationErrors().get(0)); - }) - .verify(); + .consumeErrorWith(error -> { + assertEquals(ResourceValidationException.class, error.getClass()); + assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); + assertEquals("Error occurred", ((ResourceValidationException) error).getValidationErrors().get(0)); + }) + .verify(); } - /** - * Validate Connect cluster updated when unchanged - */ @Test void updateConnectClusterUnchanged() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(Mono.just(List.of())); - when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.of(connectCluster)); + when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn( + Optional.of(connectCluster)); StepVerifier.create(connectClusterController.apply("test", connectCluster, false)) - .consumeNextWith(response -> { - assertEquals("unchanged", response.header("X-Ns4kafka-Result")); - assertEquals(connectCluster, response.body()); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertEquals("unchanged", response.header("X-Ns4kafka-Result")); + assertEquals(connectCluster, response.body()); + }) + .verifyComplete(); verify(connectClusterService, never()).create(ArgumentMatchers.any()); } - /** - * Validate Connect cluster updated when changed - */ @Test void updateConnectClusterChanged() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); ConnectCluster connectClusterChanged = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://before") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://before") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); when(connectClusterService.validateConnectClusterCreation(connectCluster)).thenReturn(Mono.just(List.of())); - when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.of(connectClusterChanged)); + when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn( + Optional.of(connectClusterChanged)); when(connectClusterService.create(connectCluster)).thenReturn(connectCluster); StepVerifier.create(connectClusterController.apply("test", connectCluster, false)) - .consumeNextWith(response -> { - assertEquals("changed", response.header("X-Ns4kafka-Result")); - assertNotNull(response.body()); - assertEquals("connect-cluster", response.body().getMetadata().getName()); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertEquals("changed", response.header("X-Ns4kafka-Result")); + assertNotNull(response.body()); + assertEquals("connect-cluster", response.body().getMetadata().getName()); + }) + .verifyComplete(); } - /** - * Validate Connect cluster creation in dry run mode - */ @Test void createConnectClusterDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceOwnerOfConnectCluster(ns, "connect-cluster")).thenReturn(true); @@ -500,32 +470,29 @@ void createConnectClusterDryRun() { when(connectClusterService.findByNamespaceAndNameOwner(ns, "connect-cluster")).thenReturn(Optional.empty()); StepVerifier.create(connectClusterController.apply("test", connectCluster, true)) - .consumeNextWith(response -> assertEquals("created", response.header("X-Ns4kafka-Result"))) - .verifyComplete(); + .consumeNextWith(response -> assertEquals("created", response.header("X-Ns4kafka-Result"))) + .verifyComplete(); verify(connectClusterService, never()).create(connectCluster); } - /** - * List available vault for connect clusters allowed without any vault config. - */ @Test void listVaultNoConnectClusterAllowedWithAes256Config() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder().build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder().build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn(List.of(connectCluster)); @@ -534,57 +501,52 @@ void listVaultNoConnectClusterAllowedWithAes256Config() { assertTrue(actual.isEmpty()); } - /** - * List available vault for connect clusters allowed with vault config. - */ @Test void listVaultConnectClusterAllowedWithAes256Config() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder().build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder().build()) + .build(); ConnectCluster connectClusterAes256 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster-aes256") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("myKeyEncryption") - .aes256Salt("p8t42EhY9z2eSUdpGeq7HX7RboMrsJAhUnu3EEJJVS") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster-aes256") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("myKeyEncryption") + .aes256Salt("p8t42EhY9z2eSUdpGeq7HX7RboMrsJAhUnu3EEJJVS") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); - when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn(List.of(connectCluster, connectClusterAes256)); + when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn( + List.of(connectCluster, connectClusterAes256)); List actual = connectClusterController.listVaults("test"); assertEquals(1, actual.size()); } - /** - * Vault password on not allowed kafka connect cluster. - */ @Test void vaultOnNonAllowedConnectCluster() { String connectClusterName = "connect-cluster-na"; Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceAllowedForConnectCluster(ns, connectClusterName)).thenReturn(false); @@ -592,67 +554,64 @@ void vaultOnNonAllowedConnectCluster() { var secrets = List.of("secret"); ResourceValidationException result = assertThrows(ResourceValidationException.class, - () -> connectClusterController.vaultPassword("test", connectClusterName, secrets)); + () -> connectClusterController.vaultPassword("test", connectClusterName, secrets)); assertEquals(1, result.getValidationErrors().size()); - assertEquals("Namespace is not allowed to use this Connect cluster connect-cluster-na.", result.getValidationErrors().get(0)); + assertEquals("Namespace is not allowed to use this Connect cluster connect-cluster-na.", + result.getValidationErrors().get(0)); } - /** - * Vault password on not valid kafka connect cluster aes256 config. - */ @Test - void vaultOnNotValidAES256ConnectCluster() { + void vaultOnNotValidAes256ConnectCluster() { String connectClusterName = "connect-cluster-aes256"; Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceAllowedForConnectCluster(ns, connectClusterName)).thenReturn(true); - when(connectClusterService.validateConnectClusterVault(ns, connectClusterName)).thenReturn(List.of("Error config.")); + when(connectClusterService.validateConnectClusterVault(ns, connectClusterName)).thenReturn( + List.of("Error config.")); var secrets = List.of("secret"); ResourceValidationException result = assertThrows(ResourceValidationException.class, - () -> connectClusterController.vaultPassword("test", connectClusterName, secrets)); + () -> connectClusterController.vaultPassword("test", connectClusterName, secrets)); assertEquals(1, result.getValidationErrors().size()); assertEquals("Error config.", result.getValidationErrors().get(0)); } - /** - * Vault password on not valid kafka connect cluster aes256 config. - */ @Test - void vaultOnValidAES256ConnectCluster() { + void vaultOnValidAes256ConnectCluster() { String connectClusterName = "connect-cluster-aes256"; Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectClusterService.isNamespaceAllowedForConnectCluster(ns, connectClusterName)).thenReturn(true); when(connectClusterService.validateConnectClusterVault(ns, connectClusterName)).thenReturn(List.of()); when(connectClusterService.vaultPassword(ns, connectClusterName, List.of("secret"))) - .thenReturn(List.of(VaultResponse.builder() - .spec(VaultResponse.VaultResponseSpec.builder() - .clearText("secret") - .encrypted("encryptedSecret") - .build()) - .build() - )); - - final List actual = connectClusterController.vaultPassword("test", connectClusterName, List.of("secret")); + .thenReturn(List.of(VaultResponse.builder() + .spec(VaultResponse.VaultResponseSpec.builder() + .clearText("secret") + .encrypted("encryptedSecret") + .build()) + .build() + )); + + final List actual = + connectClusterController.vaultPassword("test", connectClusterName, List.of("secret")); assertEquals("secret", actual.get(0).getSpec().getClearText()); assertEquals("encryptedSecret", actual.get(0).getSpec().getEncrypted()); } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterNonNamespacedControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterNonNamespacedControllerTest.java index 3838717c..e1068962 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterNonNamespacedControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ConnectClusterNonNamespacedControllerTest.java @@ -1,9 +1,14 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.connect.ConnectClusterNonNamespacedController; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; import com.michelin.ns4kafka.services.ConnectClusterService; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -12,12 +17,9 @@ import reactor.core.publisher.Flux; import reactor.test.StepVerifier; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.when; - +/** + * Connect cluster non namespaced controller test. + */ @ExtendWith(MockitoExtension.class) class ConnectClusterNonNamespacedControllerTest { @Mock @@ -26,15 +28,12 @@ class ConnectClusterNonNamespacedControllerTest { @InjectMocks ConnectClusterNonNamespacedController connectClusterNonNamespacedController; - /** - * Should list all Kafka Connects - */ @Test void shouldListAll() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .build(); when(connectClusterService.findAll(anyBoolean())).thenReturn(Flux.fromIterable(List.of(connectCluster))); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java index 7498c141..b92da632 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ConnectorControllerTest.java @@ -1,5 +1,15 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -15,7 +25,10 @@ import io.micronaut.http.HttpStatus; import io.micronaut.http.client.exceptions.HttpClientResponseException; import io.micronaut.security.utils.SecurityService; -import org.junit.jupiter.api.Assertions; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentMatchers; @@ -26,15 +39,6 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class ConnectorControllerTest { @Mock @@ -55,141 +59,124 @@ class ConnectorControllerTest { @Mock ResourceQuotaService resourceQuotaService; - /** - * Test connector listing when namespace is empty - */ @Test void listEmptyConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); List actual = connectorController.list("test"); assertTrue(actual.isEmpty()); } - /** - * Test connector listing - */ @Test void listMultipleConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); List actual = connectorController.list("test"); assertEquals(2, actual.size()); } - /** - * Test get connector by name when it does not exist - */ @Test void getConnectorEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.findByName(ns, "missing")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); Optional actual = connectorController.getConnector("test", "missing"); assertTrue(actual.isEmpty()); } - /** - * Test get connector by name - */ @Test void getConnector() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.findByName(ns, "connect1")) - .thenReturn(Optional.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build())); + .thenReturn(Optional.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build())); Optional actual = connectorController.getConnector("test", "connect1"); assertTrue(actual.isPresent()); assertEquals("connect1", actual.get().getMetadata().getName()); } - /** - * Test connector deletion when namespace is not owner - */ @Test void deleteConnectorNotOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(false); + .thenReturn(false); StepVerifier.create(connectorController.deleteConnector("test", "connect1", false)) .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Namespace not owner of this connector connect1.", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Namespace not owner of this connector connect1.", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); } - /** - * Test connector deletion when namespace is owner - */ @Test void deleteConnectorOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.of(connector)); - when(connectorService.delete(ns,connector)) - .thenReturn(Mono.just(HttpResponse.noContent())); + .thenReturn(true); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.of(connector)); + when(connectorService.delete(ns, connector)) + .thenReturn(Mono.just(HttpResponse.noContent())); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); @@ -199,25 +186,22 @@ void deleteConnectorOwned() { .verifyComplete(); } - /** - * Test connector deletion in dry run mode - */ @Test void deleteConnectorOwnedDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.of(connector)); + .thenReturn(Optional.of(ns)); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.of(connector)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); StepVerifier.create(connectorController.deleteConnector("test", "connect1", true)) .consumeNextWith(response -> assertEquals(HttpStatus.NO_CONTENT, response.getStatus())) @@ -226,147 +210,135 @@ void deleteConnectorOwnedDryRun() { verify(connectorService, never()).delete(any(), any()); } - /** - * Test connector deletion when connector is not found - */ @Test void deleteConnectorNotFound() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.of(ns)); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.empty()); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); StepVerifier.create(connectorController.deleteConnector("test", "connect1", true)) - .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) - .verifyComplete(); + .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) + .verifyComplete(); verify(connectorService, never()).delete(any(), any()); } - /** - * Test connector creation when namespace is not owner - */ @Test void createConnectorNotOwner() { Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(false); + .thenReturn(false); StepVerifier.create(connectorController.apply("test", connector, false)) .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Namespace not owner of this connector connect1.", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Namespace not owner of this connector connect1.", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); } - /** - * Test connector creation when there are local errors - */ @Test void createConnectorLocalErrors() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); when(connectorService.validateLocally(ns, connector)) - .thenReturn(Mono.just(List.of("Local Validation Error 1"))); + .thenReturn(Mono.just(List.of("Local Validation Error 1"))); StepVerifier.create(connectorController.apply("test", connector, false)) .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Local Validation Error 1", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Local Validation Error 1", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); } - /** - * Test connector creation when there are remote errors - */ @Test void createConnectorRemoteErrors() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); when(connectorService.validateLocally(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); when(connectorService.validateRemotely(ns, connector)) - .thenReturn(Mono.just(List.of("Remote Validation Error 1"))); + .thenReturn(Mono.just(List.of("Remote Validation Error 1"))); StepVerifier.create(connectorController.apply("test", connector, false)) .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Remote Validation Error 1", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Remote Validation Error 1", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); } - /** - * Test connector creation on success - */ @Test void createConnectorSuccess() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); Connector expected = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(Map.of("name", "connect1")).build()) - .status(Connector.ConnectorStatus.builder().state(Connector.TaskState.UNASSIGNED).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(Map.of("name", "connect1")).build()) + .status(Connector.ConnectorStatus.builder().state(Connector.TaskState.UNASSIGNED).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")).thenReturn(true); when(connectorService.validateLocally(ns, connector)).thenReturn(Mono.just(List.of())); @@ -376,7 +348,7 @@ void createConnectorSuccess() { when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); when(connectorService.createOrUpdate(connector)) - .thenReturn(expected); + .thenReturn(expected); StepVerifier.create(connectorController.apply("test", connector, false)) .consumeNextWith(response -> { @@ -387,30 +359,27 @@ void createConnectorSuccess() { .verifyComplete(); } - /** - * Test connector creation when there are validation failures - */ @Test void createConnectorFailQuotaValidation() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); when(connectorService.validateLocally(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); when(connectorService.validateRemotely(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); when(resourceQuotaService.validateConnectorQuota(ns)).thenReturn(List.of("Quota error")); StepVerifier.create(connectorController.apply("test", connector, false)) @@ -422,30 +391,27 @@ void createConnectorFailQuotaValidation() { .verify(); } - /** - * Test connector creation unchanged - */ @Test void createConnectorSuccessAlreadyExists() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); Connector expected = Connector.builder() - .metadata(ObjectMeta.builder() - .namespace("test") - .cluster("local") - .name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(Map.of("name", "connect1")).build()) - .status(Connector.ConnectorStatus.builder().state(Connector.TaskState.UNASSIGNED).build()) - .build(); + .metadata(ObjectMeta.builder() + .namespace("test") + .cluster("local") + .name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(Map.of("name", "connect1")).build()) + .status(Connector.ConnectorStatus.builder().state(Connector.TaskState.UNASSIGNED).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")).thenReturn(true); @@ -461,49 +427,47 @@ void createConnectorSuccessAlreadyExists() { }) .verifyComplete(); - verify(connectorService,never()).createOrUpdate(ArgumentMatchers.any()); + verify(connectorService, never()).createOrUpdate(ArgumentMatchers.any()); } - /** - * Test connector change - */ @Test void createConnectorSuccessChanged() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); - Connector connectorOld = Connector.builder().metadata(ObjectMeta.builder().name("connect1").labels(Map.of("label", "labelValue")).build()).build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); + Connector connectorOld = Connector.builder() + .metadata(ObjectMeta.builder().name("connect1").labels(Map.of("label", "labelValue")).build()).build(); Connector expected = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect1") - .labels(Map.of("label", "labelValue")) - .build()) - .spec(Connector.ConnectorSpec.builder().config(Map.of("name", "connect1")).build()) - .status(Connector.ConnectorStatus.builder().state(Connector.TaskState.UNASSIGNED).build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect1") + .labels(Map.of("label", "labelValue")) + .build()) + .spec(Connector.ConnectorSpec.builder().config(Map.of("name", "connect1")).build()) + .status(Connector.ConnectorStatus.builder().state(Connector.TaskState.UNASSIGNED).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); when(connectorService.validateLocally(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); when(connectorService.validateRemotely(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); when(connectorService.findByName(ns, "connect1")) - .thenReturn(Optional.of(connectorOld)); + .thenReturn(Optional.of(connectorOld)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); when(connectorService.createOrUpdate(connector)) - .thenReturn(expected); + .thenReturn(expected); StepVerifier.create(connectorController.apply("test", connector, false)) .consumeNextWith(response -> { @@ -514,30 +478,27 @@ void createConnectorSuccessChanged() { .verifyComplete(); } - /** - * Test connector creation in dry mode - */ @Test void createConnectorDryRun() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder().config(new HashMap<>()).build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); + .thenReturn(true); when(connectorService.validateLocally(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); when(connectorService.validateRemotely(ns, connector)) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); StepVerifier.create(connectorController.apply("test", connector, true)) .consumeNextWith(response -> assertEquals("created", response.header("X-Ns4kafka-Result"))) @@ -546,26 +507,23 @@ void createConnectorDryRun() { verify(connectorService, never()).createOrUpdate(connector); } - /** - * Test connector import - */ @Test void importConnector() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector1 = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Connector connector2 = Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - + .thenReturn(Optional.of(ns)); + when(connectorService.listUnsynchronizedConnectors(ns)) - .thenReturn(Flux.fromIterable(List.of(connector1, connector2))); - + .thenReturn(Flux.fromIterable(List.of(connector1, connector2))); + when(connectorService.createOrUpdate(connector1)).thenReturn(connector1); when(connectorService.createOrUpdate(connector2)).thenReturn(connector2); @@ -575,26 +533,23 @@ void importConnector() { .verifyComplete(); } - /** - * Test connector import in dry mode - */ @Test void importConnectorDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector1 = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); Connector connector2 = Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build(); Connector connector3 = Connector.builder().metadata(ObjectMeta.builder().name("connect3").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.listUnsynchronizedConnectors(ns)) - .thenReturn(Flux.fromIterable(List.of(connector1, connector2))); + .thenReturn(Flux.fromIterable(List.of(connector1, connector2))); StepVerifier.create(connectorController.importResources("test", true)) .consumeNextWith(connect1 -> assertEquals("connect1", connect1.getMetadata().getName())) @@ -606,93 +561,89 @@ void importConnectorDryRun() { verify(connectorService, never()).createOrUpdate(connector3); } - /** - * Test connector restart when namespace is not owner - */ @Test void restartConnectorNotOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(false); + .thenReturn(false); ChangeConnectorState restart = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.restart).build()) + .build(); StepVerifier.create(connectorController.changeState("test", "connect1", restart)) .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Namespace not owner of this connector connect1.", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Namespace not owner of this connector connect1.", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); } - /** - * Test connector restart when it does not exist - */ @Test void restartConnectorNotExists() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.empty()); + .thenReturn(true); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.empty()); ChangeConnectorState restart = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.restart).build()) + .build(); StepVerifier.create(connectorController.changeState("test", "connect1", restart)) .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) .verifyComplete(); - verify(connectorService,never()).restart(ArgumentMatchers.any(), ArgumentMatchers.any()); + verify(connectorService, never()).restart(ArgumentMatchers.any(), ArgumentMatchers.any()); } - /** - * Test connector restart throwing an exception - */ @Test void restartConnectorException() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.of(connector)); - when(connectorService.restart(ArgumentMatchers.any(),ArgumentMatchers.any())) - .thenReturn(Mono.error(new HttpClientResponseException("Rebalancing", HttpResponse.status(HttpStatus.CONFLICT)))); + .thenReturn(true); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.of(connector)); + when(connectorService.restart(ArgumentMatchers.any(), ArgumentMatchers.any())) + .thenReturn( + Mono.error(new HttpClientResponseException("Rebalancing", HttpResponse.status(HttpStatus.CONFLICT)))); ChangeConnectorState restart = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.restart).build()) + .build(); StepVerifier.create(connectorController.changeState("test", "connect1", restart)) .consumeNextWith(response -> { @@ -704,32 +655,30 @@ void restartConnectorException() { .verifyComplete(); } - /** - * Test connector restart when namespace is owner - */ @Test void restartConnectorOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.of(connector)); - when(connectorService.restart(ArgumentMatchers.any(),ArgumentMatchers.any())) - .thenReturn(Mono.just(HttpResponse.noContent())); + .thenReturn(true); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.of(connector)); + when(connectorService.restart(ArgumentMatchers.any(), ArgumentMatchers.any())) + .thenReturn(Mono.just(HttpResponse.noContent())); ChangeConnectorState changeConnectorState = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.restart).build()) + .build(); StepVerifier.create(connectorController.changeState("test", "connect1", changeConnectorState)) .consumeNextWith(response -> { @@ -741,35 +690,32 @@ void restartConnectorOwned() { .verifyComplete(); } - /** - * Test connector pause when namespace is owner - */ @Test void pauseConnectorOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.of(connector)); - when(connectorService.pause(ArgumentMatchers.any(),ArgumentMatchers.any())) - .thenReturn(Mono.just(HttpResponse.noContent())); + .thenReturn(true); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.of(connector)); + when(connectorService.pause(ArgumentMatchers.any(), ArgumentMatchers.any())) + .thenReturn(Mono.just(HttpResponse.noContent())); ChangeConnectorState changeConnectorState = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec - .builder() - .action(ChangeConnectorState.ConnectorAction.pause) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec + .builder() + .action(ChangeConnectorState.ConnectorAction.pause) + .build()) + .build(); StepVerifier.create(connectorController.changeState("test", "connect1", changeConnectorState)) .consumeNextWith(response -> { @@ -781,34 +727,31 @@ void pauseConnectorOwned() { .verifyComplete(); } - /** - * Test connector resume when namespace is owner - */ @Test void resumeConnectorOwned() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Connector connector = Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(connectorService.isNamespaceOwnerOfConnect(ns, "connect1")) - .thenReturn(true); - when(connectorService.findByName(ns,"connect1")) - .thenReturn(Optional.of(connector)); - when(connectorService.resume(ArgumentMatchers.any(),ArgumentMatchers.any())) - .thenReturn(Mono.just(HttpResponse.noContent())); + .thenReturn(true); + when(connectorService.findByName(ns, "connect1")) + .thenReturn(Optional.of(connector)); + when(connectorService.resume(ArgumentMatchers.any(), ArgumentMatchers.any())) + .thenReturn(Mono.just(HttpResponse.noContent())); ChangeConnectorState changeConnectorState = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec - .builder() - .action(ChangeConnectorState.ConnectorAction.resume) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec + .builder() + .action(ChangeConnectorState.ConnectorAction.resume) + .build()) + .build(); StepVerifier.create(connectorController.changeState("test", "connect1", changeConnectorState)) .consumeNextWith(response -> { diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java index cfb1d046..88f5d154 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ConsumerGroupControllerTest.java @@ -1,11 +1,25 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.notNull; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AuditLog; +import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsets; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsets.ConsumerGroupResetOffsetsSpec; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsets.ResetOffsetsMethod; -import com.michelin.ns4kafka.models.Namespace; -import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsetsResponse; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.ConsumerGroupService; @@ -13,6 +27,11 @@ import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.security.utils.SecurityService; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; import org.apache.kafka.common.TopicPartition; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -21,16 +40,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.*; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class ConsumerGroupControllerTest { @Mock @@ -48,53 +57,50 @@ class ConsumerGroupControllerTest { @InjectMocks ConsumerGroupController consumerGroupController; - /** - * Assert the offsets reset is valid - * @throws InterruptedException Interrupted exception thrown - * @throws ExecutionException Execution exception thrown - */ @Test void resetSuccess() throws InterruptedException, ExecutionException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ConsumerGroupResetOffsets resetOffset = ConsumerGroupResetOffsets.builder() - .metadata(ObjectMeta.builder() - .name("groupID") - .cluster("local") - .build()) - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("topic1") - .method(ResetOffsetsMethod.TO_EARLIEST) - .options(null) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("groupID") + .cluster("local") + .build()) + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("topic1") + .method(ResetOffsetsMethod.TO_EARLIEST) + .options(null) + .build()) + .build(); TopicPartition topicPartition1 = new TopicPartition("topic1", 0); TopicPartition topicPartition2 = new TopicPartition("topic1", 1); List topicPartitions = List.of(topicPartition1, topicPartition2); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(consumerGroupService.validateResetOffsets(resetOffset)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(consumerGroupService.isNamespaceOwnerOfConsumerGroup("test", "groupID")) - .thenReturn(true); + .thenReturn(true); when(consumerGroupService.getConsumerGroupStatus(ns, "groupID")) - .thenReturn("Empty"); + .thenReturn("Empty"); when(consumerGroupService.getPartitionsToReset(ns, "groupID", "topic1")) - .thenReturn(topicPartitions); - when(consumerGroupService.prepareOffsetsToReset(ns, "groupID", null, topicPartitions, ResetOffsetsMethod.TO_EARLIEST)) - .thenReturn(Map.of(topicPartition1, 5L, topicPartition2, 10L)); + .thenReturn(topicPartitions); + when(consumerGroupService.prepareOffsetsToReset(ns, "groupID", null, topicPartitions, + ResetOffsetsMethod.TO_EARLIEST)) + .thenReturn(Map.of(topicPartition1, 5L, topicPartition2, 10L)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - List result = consumerGroupController.resetOffsets("test", "groupID", resetOffset, false); + List result = + consumerGroupController.resetOffsets("test", "groupID", resetOffset, false); ConsumerGroupResetOffsetsResponse resultTopicPartition1 = result .stream() @@ -106,75 +112,73 @@ void resetSuccess() throws InterruptedException, ExecutionException { assertEquals(5L, resultTopicPartition1.getSpec().getOffset()); ConsumerGroupResetOffsetsResponse resultTopicPartition2 = result - .stream() - .filter(topicPartitionOffset -> topicPartitionOffset.getSpec().getPartition() == 1) - .findFirst() - .orElse(null); + .stream() + .filter(topicPartitionOffset -> topicPartitionOffset.getSpec().getPartition() == 1) + .findFirst() + .orElse(null); assertNotNull(resultTopicPartition2); assertEquals(10L, resultTopicPartition2.getSpec().getOffset()); - verify(consumerGroupService, times(1)).alterConsumerGroupOffsets(ArgumentMatchers.eq(ns), ArgumentMatchers.eq("groupID"), anyMap()); + verify(consumerGroupService, times(1)).alterConsumerGroupOffsets(ArgumentMatchers.eq(ns), + ArgumentMatchers.eq("groupID"), anyMap()); } - /** - * Assert the offsets reset is valid in dry mode - * @throws InterruptedException Interrupted exception thrown - * @throws ExecutionException Execution exception thrown - */ @Test void resetDryRunSuccess() throws InterruptedException, ExecutionException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ConsumerGroupResetOffsets resetOffset = ConsumerGroupResetOffsets.builder() - .metadata(ObjectMeta.builder() - .name("groupID") - .cluster("local") - .build()) - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("topic1") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("groupID") + .cluster("local") + .build()) + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("topic1") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); TopicPartition topicPartition1 = new TopicPartition("topic1", 0); TopicPartition topicPartition2 = new TopicPartition("topic1", 1); List topicPartitions = List.of(topicPartition1, topicPartition2); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(consumerGroupService.validateResetOffsets(resetOffset)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(consumerGroupService.isNamespaceOwnerOfConsumerGroup("test", "groupID")) - .thenReturn(true); + .thenReturn(true); when(consumerGroupService.getConsumerGroupStatus(ns, "groupID")) - .thenReturn("Empty"); + .thenReturn("Empty"); when(consumerGroupService.getPartitionsToReset(ns, "groupID", "topic1")) - .thenReturn(topicPartitions); - when(consumerGroupService.prepareOffsetsToReset(ns, "groupID", null, topicPartitions, ResetOffsetsMethod.TO_EARLIEST)) - .thenReturn(Map.of(topicPartition1, 5L, topicPartition2, 10L)); + .thenReturn(topicPartitions); + when(consumerGroupService.prepareOffsetsToReset(ns, "groupID", null, topicPartitions, + ResetOffsetsMethod.TO_EARLIEST)) + .thenReturn(Map.of(topicPartition1, 5L, topicPartition2, 10L)); - List result = consumerGroupController.resetOffsets("test", "groupID", resetOffset, true); + List result = + consumerGroupController.resetOffsets("test", "groupID", resetOffset, true); ConsumerGroupResetOffsetsResponse resultTopicPartition1 = result - .stream() - .filter(topicPartitionOffset -> topicPartitionOffset.getSpec().getPartition() == 0) - .findFirst() - .orElse(null); + .stream() + .filter(topicPartitionOffset -> topicPartitionOffset.getSpec().getPartition() == 0) + .findFirst() + .orElse(null); assertNotNull(resultTopicPartition1); assertEquals(5L, resultTopicPartition1.getSpec().getOffset()); ConsumerGroupResetOffsetsResponse resultTopicPartition2 = result - .stream() - .filter(topicPartitionOffset -> topicPartitionOffset.getSpec().getPartition() == 1) - .findFirst() - .orElse(null); + .stream() + .filter(topicPartitionOffset -> topicPartitionOffset.getSpec().getPartition() == 1) + .findFirst() + .orElse(null); assertNotNull(resultTopicPartition2); assertEquals(10L, resultTopicPartition2.getSpec().getOffset()); @@ -182,155 +186,127 @@ void resetDryRunSuccess() throws InterruptedException, ExecutionException { verify(consumerGroupService, never()).alterConsumerGroupOffsets(notNull(), anyString(), anyMap()); } - /** - * Assert an error message is returned when an error occurred in offsets reset - * @throws InterruptedException Interrupted exception thrown - * @throws ExecutionException Execution exception thrown - */ @Test void resetExecutionError() throws InterruptedException, ExecutionException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ConsumerGroupResetOffsets resetOffset = ConsumerGroupResetOffsets.builder() - .metadata(ObjectMeta.builder() - .name("groupID") - .cluster("local") - .build()) - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("topic1") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("groupID") + .cluster("local") + .build()) + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("topic1") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(consumerGroupService.validateResetOffsets(resetOffset)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(consumerGroupService.isNamespaceOwnerOfConsumerGroup("test", "groupID")) - .thenReturn(true); + .thenReturn(true); when(consumerGroupService.getConsumerGroupStatus(ns, "groupID")) - .thenReturn("Empty"); + .thenReturn("Empty"); when(consumerGroupService.getPartitionsToReset(ns, "groupID", "topic1")) - .thenThrow(new ExecutionException("Error during getPartitionsToReset", new Throwable())); + .thenThrow(new ExecutionException("Error during getPartitionsToReset", new Throwable())); ExecutionException result = assertThrows(ExecutionException.class, - () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); + () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); assertEquals("Error during getPartitionsToReset", result.getMessage()); } - /** - * Assert an error message is returned when the namespace is not owner of consumer group - */ @Test void resetValidationErrorNotOwnerOfConsumerGroup() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); - ConsumerGroupResetOffsets resetOffset = ConsumerGroupResetOffsets.builder() - .metadata(ObjectMeta.builder() - .name("groupID") - .cluster("local") - .build()) - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("topic1") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("groupID") + .cluster("local") + .build()) + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("topic1") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); when(consumerGroupService.validateResetOffsets(resetOffset)) - .thenReturn(new ArrayList<>()); + .thenReturn(new ArrayList<>()); when(consumerGroupService.isNamespaceOwnerOfConsumerGroup("test", "groupID")) - .thenReturn(false); + .thenReturn(false); ResourceValidationException result = assertThrows(ResourceValidationException.class, - () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); + () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); - assertLinesMatch(List.of("Namespace not owner of this consumer group \"groupID\"."), result.getValidationErrors()); + assertLinesMatch(List.of("Namespace not owner of this consumer group \"groupID\"."), + result.getValidationErrors()); } - /** - * Assert an error message is returned when the offsets reset options are not valid - */ @Test void resetValidationErrorInvalidResource() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); - ConsumerGroupResetOffsets resetOffset = ConsumerGroupResetOffsets.builder() - .metadata(ObjectMeta.builder() - .name("groupID") - .cluster("local") - .build()) - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("topic1") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("groupID") + .cluster("local") + .build()) + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("topic1") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); - when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); when(consumerGroupService.validateResetOffsets(resetOffset)) - .thenReturn(List.of("Validation Error")); + .thenReturn(List.of("Validation Error")); when(consumerGroupService.isNamespaceOwnerOfConsumerGroup("test", "groupID")) - .thenReturn(true); + .thenReturn(true); ResourceValidationException result = assertThrows(ResourceValidationException.class, - () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); + () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); assertLinesMatch(List.of("Validation Error"), result.getValidationErrors()); } - /** - * Assert an error message is returned when the consumer group is active - */ @Test void resetValidationErrorConsumerGroupActive() throws ExecutionException, InterruptedException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ConsumerGroupResetOffsets resetOffset = ConsumerGroupResetOffsets.builder() - .metadata(ObjectMeta.builder() - .name("groupID") - .cluster("local") - .build()) - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("topic1") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("groupID") + .cluster("local") + .build()) + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("topic1") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(consumerGroupService.validateResetOffsets(resetOffset)) - .thenReturn(new ArrayList<>()); + .thenReturn(new ArrayList<>()); when(consumerGroupService.isNamespaceOwnerOfConsumerGroup("test", "groupID")) - .thenReturn(true); + .thenReturn(true); when(consumerGroupService.getConsumerGroupStatus(ns, "groupID")) - .thenReturn("Active"); + .thenReturn("Active"); IllegalStateException result = assertThrows(IllegalStateException.class, - () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); + () -> consumerGroupController.resetOffsets("test", "groupID", resetOffset, false)); - assertEquals("Assignments can only be reset if the consumer group \"groupID\" is inactive, but the current state is active.", result.getMessage()); + assertEquals( + "Assignments can only be reset if the consumer group \"groupID\" is inactive, " + + "but the current state is active.", + result.getMessage()); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java index 445a6d5d..b4987c0d 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ExceptionHandlerControllerTest.java @@ -1,5 +1,7 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; + import com.michelin.ns4kafka.utils.exceptions.ResourceValidationException; import io.micronaut.http.HttpMethod; import io.micronaut.http.HttpRequest; @@ -7,23 +9,19 @@ import io.micronaut.security.authentication.Authentication; import io.micronaut.security.authentication.AuthenticationException; import io.micronaut.security.authentication.AuthorizationException; -import org.junit.jupiter.api.Test; - import jakarta.validation.ConstraintViolationException; import java.util.List; import java.util.Map; import java.util.Set; - -import static org.junit.jupiter.api.Assertions.assertEquals; +import org.junit.jupiter.api.Test; class ExceptionHandlerControllerTest { - ExceptionHandlerController exceptionHandlerController = new ExceptionHandlerController(); @Test void resourceValidationError() { var response = exceptionHandlerController.error(HttpRequest.create(HttpMethod.POST, "local"), - new ResourceValidationException(List.of("Error1", "Error2"),"Topic", "Name")); + new ResourceValidationException(List.of("Error1", "Error2"), "Topic", "Name")); var status = response.body(); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, response.getStatus()); @@ -38,7 +36,7 @@ void resourceValidationError() { @Test void constraintViolationError() { var response = exceptionHandlerController.error(HttpRequest.create(HttpMethod.POST, "local"), - new ConstraintViolationException(Set.of())); + new ConstraintViolationException(Set.of())); var status = response.body(); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, response.getStatus()); @@ -48,7 +46,7 @@ void constraintViolationError() { @Test void authorizationUnauthorizedError() { var response = exceptionHandlerController.error(HttpRequest.create(HttpMethod.POST, "local"), - new AuthorizationException(null)); + new AuthorizationException(null)); var status = response.body(); assertEquals(HttpStatus.UNAUTHORIZED, response.getStatus()); @@ -58,7 +56,7 @@ void authorizationUnauthorizedError() { @Test void authorizationForbiddenError() { var response = exceptionHandlerController.error(HttpRequest.create(HttpMethod.POST, "local"), - new AuthorizationException(Authentication.build("user", Map.of()))); + new AuthorizationException(Authentication.build("user", Map.of()))); var status = response.body(); assertEquals(HttpStatus.FORBIDDEN, response.getStatus()); @@ -68,7 +66,7 @@ void authorizationForbiddenError() { @Test void authenticationError() { var response = exceptionHandlerController.error(HttpRequest.create(HttpMethod.POST, "local"), - new AuthenticationException()); + new AuthenticationException()); var status = response.body(); assertEquals(HttpStatus.UNAUTHORIZED, response.getStatus()); @@ -78,7 +76,7 @@ void authenticationError() { @Test void anyError() { var response = exceptionHandlerController.error(HttpRequest.create(HttpMethod.POST, "local"), - new Exception()); + new Exception()); var status = response.body(); assertEquals(HttpStatus.INTERNAL_SERVER_ERROR, response.getStatus()); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java index ce2e3e9c..ac765e20 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/NamespaceControllerTest.java @@ -1,5 +1,13 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -9,6 +17,9 @@ import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpResponse; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -17,15 +28,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class NamespaceControllerTest { @Mock @@ -41,39 +43,40 @@ class NamespaceControllerTest { NamespaceController namespaceController; @Test - void applyCreateInvalid(){ + void applyCreateInvalid() { Namespace toCreate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("new-namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("new-namespace") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("new-namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); when(namespaceService.validateCreation(toCreate)) - .thenReturn(List.of("OneError")); + .thenReturn(List.of("OneError")); - ResourceValidationException actual = assertThrows(ResourceValidationException.class,() -> namespaceController.apply(toCreate, false)); + ResourceValidationException actual = + assertThrows(ResourceValidationException.class, () -> namespaceController.apply(toCreate, false)); assertEquals(1, actual.getValidationErrors().size()); } @Test - void applyCreateSuccess(){ + void applyCreateSuccess() { Namespace toCreate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("new-namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("new-namespace") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("new-namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); when(namespaceService.validateCreation(toCreate)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); when(namespaceService.createOrUpdate(toCreate)) - .thenReturn(toCreate); + .thenReturn(toCreate); var response = namespaceController.apply(toCreate, false); Namespace actual = response.body(); @@ -83,17 +86,17 @@ void applyCreateSuccess(){ } @Test - void applyCreateDryRun(){ + void applyCreateDryRun() { Namespace toCreate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("new-namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("new-namespace") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("new-namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); when(namespaceService.validateCreation(toCreate)) - .thenReturn(List.of()); + .thenReturn(List.of()); var response = namespaceController.apply(toCreate, true); assertEquals("created", response.header("X-Ns4kafka-Result")); @@ -101,65 +104,65 @@ void applyCreateDryRun(){ } @Test - void applyUpdateInvalid(){ + void applyUpdateInvalid() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); Namespace toUpdate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local-change") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user-change") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local-change") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user-change") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> namespaceController.apply(toUpdate, false)); + () -> namespaceController.apply(toUpdate, false)); assertEquals(2, actual.getValidationErrors().size()); Assertions.assertIterableEquals( - List.of("Invalid value local-change for cluster: Value is immutable (local)", - "Invalid value user-change for kafkaUser: Value is immutable (user)"), - actual.getValidationErrors()); + List.of("Invalid value local-change for cluster: Value is immutable (local)", + "Invalid value user-change for kafkaUser: Value is immutable (user)"), + actual.getValidationErrors()); } @Test - void applyUpdateSuccess(){ + void applyUpdateSuccess() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); Namespace toUpdate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .labels(Map.of("new", "label")) - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .labels(Map.of("new", "label")) + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); when(namespaceService.createOrUpdate(toUpdate)) - .thenReturn(toUpdate); + .thenReturn(toUpdate); var response = namespaceController.apply(toUpdate, false); Namespace actual = response.body(); @@ -173,59 +176,59 @@ void applyUpdateSuccess(){ } @Test - void applyUpdateSuccess_AlreadyExists(){ + void applyUpdateSuccess_AlreadyExists() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .namespace("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .namespace("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); Namespace toUpdate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); var response = namespaceController.apply(toUpdate, false); Namespace actual = response.body(); assertEquals("unchanged", response.header("X-Ns4kafka-Result")); assertEquals(existing, actual); - verify(namespaceService,never()).createOrUpdate(ArgumentMatchers.any()); + verify(namespaceService, never()).createOrUpdate(ArgumentMatchers.any()); } @Test - void applyUpdateDryRun(){ + void applyUpdateDryRun() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); Namespace toUpdate = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .labels(Map.of("new", "label")) - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .labels(Map.of("new", "label")) + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); var response = namespaceController.apply(toUpdate, true); assertEquals("changed", response.header("X-Ns4kafka-Result")); @@ -235,18 +238,18 @@ void applyUpdateDryRun(){ @Test void deleteSuccess() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); when(namespaceService.listAllNamespaceResources(existing)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); @@ -258,19 +261,19 @@ void deleteSuccess() { @Test void deleteSuccessDryRun() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); when(namespaceService.listAllNamespaceResources(existing)) - .thenReturn(List.of()); + .thenReturn(List.of()); var result = namespaceController.delete("namespace", true); @@ -282,7 +285,7 @@ void deleteSuccessDryRun() { @Test void deleteFailNoNamespace() { when(namespaceService.findByName("namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); var result = namespaceController.delete("namespace", false); verify(namespaceService, never()).delete(any()); assertEquals(HttpResponse.notFound().getStatus(), result.getStatus()); @@ -292,19 +295,19 @@ void deleteFailNoNamespace() { @Test void deleteFailNamespaceNotEmpty() { Namespace existing = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user") + .build()) + .build(); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(existing)); + .thenReturn(Optional.of(existing)); when(namespaceService.listAllNamespaceResources(existing)) - .thenReturn(List.of("Topic/topic1")); - assertThrows(ResourceValidationException.class,() -> namespaceController.delete("namespace", false)); + .thenReturn(List.of("Topic/topic1")); + assertThrows(ResourceValidationException.class, () -> namespaceController.delete("namespace", false)); verify(namespaceService, never()).delete(any()); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java index 15268bfe..efbb6193 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaControllerTest.java @@ -1,5 +1,16 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.quota.ResourceQuotaController; import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.Namespace; @@ -14,6 +25,9 @@ import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentMatchers; @@ -21,13 +35,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class ResourceQuotaControllerTest { @InjectMocks @@ -45,25 +52,22 @@ class ResourceQuotaControllerTest { @Mock ApplicationEventPublisher applicationEventPublisher; - /** - * Validate quota listing - */ @Test void list() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuotaResponse response = ResourceQuotaResponse.builder() - .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() - .countTopic("0/INF") - .countPartition("0/INF") - .countConnector("0/INF") - .build()) - .build(); + .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() + .countTopic("0/INF") + .countPartition("0/INF") + .countConnector("0/INF") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(resourceQuotaService.findByNamespace(ns.getMetadata().getName())).thenReturn(Optional.empty()); @@ -74,17 +78,14 @@ void list() { assertEquals(response, actual.get(0)); } - /** - * Validate quota get is empty - */ @Test void getEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(resourceQuotaService.findByName(ns.getMetadata().getName(), "quotaName")).thenReturn(Optional.empty()); @@ -93,91 +94,85 @@ void getEmpty() { assertTrue(actual.isEmpty()); } - /** - * Validate quota get present - */ @Test void getPresent() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of("count/topics", "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of("count/topics", "1")) + .build(); ResourceQuotaResponse response = ResourceQuotaResponse.builder() - .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() - .countTopic("0/INF") - .build()) - .build(); + .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() + .countTopic("0/INF") + .build()) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); - when(resourceQuotaService.findByName(ns.getMetadata().getName(), "quotaName")).thenReturn(Optional.of(resourceQuota)); - when(resourceQuotaService.getUsedResourcesByQuotaByNamespace(ns, Optional.of(resourceQuota))).thenReturn(response); + when(resourceQuotaService.findByName(ns.getMetadata().getName(), "quotaName")).thenReturn( + Optional.of(resourceQuota)); + when(resourceQuotaService.getUsedResourcesByQuotaByNamespace(ns, Optional.of(resourceQuota))).thenReturn( + response); Optional actual = resourceQuotaController.get("test", "quotaName"); assertTrue(actual.isPresent()); assertEquals(response, actual.get()); } - /** - * Validate quota apply when there are validation errors - */ @Test void applyValidationErrors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of("count/topics", "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of("count/topics", "1")) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); - when(resourceQuotaService.validateNewResourceQuota(ns, resourceQuota)).thenReturn(List.of("Quota already exceeded")); + when(resourceQuotaService.validateNewResourceQuota(ns, resourceQuota)).thenReturn( + List.of("Quota already exceeded")); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> resourceQuotaController.apply("test", resourceQuota, false)); + () -> resourceQuotaController.apply("test", resourceQuota, false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch(List.of("Quota already exceeded"), actual.getValidationErrors()); verify(resourceQuotaService, never()).create(ArgumentMatchers.any()); } - /** - * Validate quota apply when quota is unchanged - */ @Test void applyUnchanged() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of("count/topics", "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of("count/topics", "1")) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(resourceQuotaService.validateNewResourceQuota(ns, resourceQuota)).thenReturn(List.of()); @@ -189,25 +184,22 @@ void applyUnchanged() { assertEquals(resourceQuota, response.body()); } - /** - * Validate quota apply in dry mode - */ @Test void applyDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of("count/topics", "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of("count/topics", "1")) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(resourceQuotaService.validateNewResourceQuota(ns, resourceQuota)).thenReturn(List.of()); @@ -218,25 +210,22 @@ void applyDryRun() { verify(resourceQuotaService, never()).create(ArgumentMatchers.any()); } - /** - * Validate quota apply when quota is created - */ @Test void applyCreated() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("created-quota") - .build()) - .spec(Map.of("count/topics", "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("created-quota") + .build()) + .spec(Map.of("count/topics", "1")) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(resourceQuotaService.validateNewResourceQuota(ns, resourceQuota)).thenReturn(List.of()); @@ -252,37 +241,35 @@ void applyCreated() { assertEquals("created-quota", actual.getMetadata().getName()); } - /** - * Validate quota apply when quota is updated - */ @Test void applyUpdated() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuotaExisting = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("created-quota") - .build()) - .spec(Map.of("count/topics", "3")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("created-quota") + .build()) + .spec(Map.of("count/topics", "3")) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("created-quota") - .build()) - .spec(Map.of("count/topics", "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("created-quota") + .build()) + .spec(Map.of("count/topics", "1")) + .build(); when(namespaceService.findByName("test")).thenReturn(Optional.of(ns)); when(resourceQuotaService.validateNewResourceQuota(ns, resourceQuota)).thenReturn(List.of()); - when(resourceQuotaService.findByNamespace(ns.getMetadata().getName())).thenReturn(Optional.of(resourceQuotaExisting)); + when(resourceQuotaService.findByNamespace(ns.getMetadata().getName())).thenReturn( + Optional.of(resourceQuotaExisting)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); @@ -295,9 +282,6 @@ void applyUpdated() { assertEquals("1", actual.getSpec().get("count/topics")); } - /** - * Validate resource quota deletion when quota is not found - */ @Test void deleteNotFound() { when(resourceQuotaService.findByName("test", "quota")).thenReturn(Optional.empty()); @@ -306,18 +290,15 @@ void deleteNotFound() { verify(resourceQuotaService, never()).delete(ArgumentMatchers.any()); } - /** - * Validate resource quota deletion in dry run mode - */ @Test void deleteDryRun() { ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("created-quota") - .build()) - .spec(Map.of("count/topics", "3")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("created-quota") + .build()) + .spec(Map.of("count/topics", "3")) + .build(); when(resourceQuotaService.findByName("test", "quota")).thenReturn(Optional.of(resourceQuota)); HttpResponse actual = resourceQuotaController.delete("test", "quota", true); @@ -325,18 +306,15 @@ void deleteDryRun() { verify(resourceQuotaService, never()).delete(ArgumentMatchers.any()); } - /** - * Validate resource quota deletion - */ @Test void delete() { ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("created-quota") - .build()) - .spec(Map.of("count/topics", "3")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("created-quota") + .build()) + .spec(Map.of("count/topics", "3")) + .build(); when(resourceQuotaService.findByName("test", "quota")).thenReturn(Optional.of(resourceQuota)); when(securityService.username()).thenReturn(Optional.of("test-user")); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaNonNamespacedControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaNonNamespacedControllerTest.java index 5bca7020..74daadd9 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaNonNamespacedControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/ResourceQuotaNonNamespacedControllerTest.java @@ -1,19 +1,22 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.quota.ResourceQuotaNonNamespacedController; +import com.michelin.ns4kafka.models.Namespace; +import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.quota.ResourceQuotaResponse; +import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.ResourceQuotaService; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class ResourceQuotaNonNamespacedControllerTest { @InjectMocks @@ -22,20 +25,31 @@ class ResourceQuotaNonNamespacedControllerTest { @Mock ResourceQuotaService resourceQuotaService; - /** - * Validate quota listing - */ + @Mock + NamespaceService namespaceService; + @Test void listAll() { + Namespace namespace = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); + ResourceQuotaResponse response = ResourceQuotaResponse.builder() - .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() - .countTopic("2/5") - .countPartition("2/10") - .countConnector("5/5") - .build()) - .build(); - - when(resourceQuotaService.getUsedResourcesByQuotaForAllNamespaces()).thenReturn(List.of(response)); + .spec(ResourceQuotaResponse.ResourceQuotaResponseSpec.builder() + .countTopic("2/5") + .countPartition("2/10") + .countConnector("5/5") + .build()) + .build(); + + when(namespaceService.listAll()).thenReturn(List.of(namespace)); + when(resourceQuotaService.getUsedQuotaByNamespaces(any())).thenReturn(List.of(response)); List actual = resourceQuotaController.listAll(); assertEquals(1, actual.size()); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/RoleBindingControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/RoleBindingControllerTest.java index 2d90dac6..45a71c62 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/RoleBindingControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/RoleBindingControllerTest.java @@ -1,5 +1,13 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -9,6 +17,8 @@ import com.michelin.ns4kafka.services.RoleBindingService; import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.security.utils.SecurityService; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentMatchers; @@ -16,14 +26,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class RoleBindingControllerTest { @Mock @@ -44,16 +46,16 @@ class RoleBindingControllerTest { @Test void applySuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); RoleBinding rolebinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .build()) + .build(); when(namespaceService.findByName(any())).thenReturn(Optional.of(ns)); when(securityService.username()).thenReturn(Optional.of("test-user")); @@ -69,51 +71,51 @@ void applySuccess() { @Test void applySuccess_AlreadyExists() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); RoleBinding rolebinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .build()) + .build(); when(namespaceService.findByName(any())).thenReturn(Optional.of(ns)); - when(roleBindingService.findByName("test","test.rolebinding")) - .thenReturn(Optional.of(rolebinding)); + when(roleBindingService.findByName("test", "test.rolebinding")) + .thenReturn(Optional.of(rolebinding)); var response = roleBindingController.apply("test", rolebinding, false); RoleBinding actual = response.body(); assertEquals("unchanged", response.header("X-Ns4kafka-Result")); assertEquals(actual.getMetadata().getName(), rolebinding.getMetadata().getName()); - verify(roleBindingService,never()).create(ArgumentMatchers.any()); + verify(roleBindingService, never()).create(ArgumentMatchers.any()); } @Test void applySuccess_Changed() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); RoleBinding rolebinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .build()) + .build(); RoleBinding rolebindingOld = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .labels(Map.of("old", "label")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .labels(Map.of("old", "label")) + .build()) + .build(); when(namespaceService.findByName(any())).thenReturn(Optional.of(ns)); - when(roleBindingService.findByName("test","test.rolebinding")) - .thenReturn(Optional.of(rolebindingOld)); + when(roleBindingService.findByName("test", "test.rolebinding")) + .thenReturn(Optional.of(rolebindingOld)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); @@ -127,16 +129,16 @@ void applySuccess_Changed() { @Test void createDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); RoleBinding rolebinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .build()) + .build(); when(namespaceService.findByName(any())).thenReturn(Optional.of(ns)); @@ -150,16 +152,16 @@ void createDryRun() { void deleteSucess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); RoleBinding rolebinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .build()) + .build(); //when(namespaceService.findByName(any())).thenReturn(Optional.of(ns)); when(roleBindingService.findByName(any(), any())).thenReturn(Optional.of(rolebinding)); @@ -168,17 +170,17 @@ void deleteSucess() { doNothing().when(applicationEventPublisher).publishEvent(any()); assertDoesNotThrow( - () -> roleBindingController.delete("test", "test.rolebinding", false) + () -> roleBindingController.delete("test", "test.rolebinding", false) ); } @Test void deleteSuccessDryRun() { RoleBinding rolebinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("test.rolebinding") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.rolebinding") + .build()) + .build(); when(roleBindingService.findByName(any(), any())).thenReturn(Optional.of(rolebinding)); diff --git a/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java index 7ff8ae3e..7f7ade4e 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/SchemaControllerTest.java @@ -1,5 +1,14 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -13,6 +22,8 @@ import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -22,13 +33,6 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.List; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class SchemaControllerTest { @Mock @@ -46,10 +50,6 @@ class SchemaControllerTest { @Mock ApplicationEventPublisher applicationEventPublisher; - /** - * Test the schema creation - * The response should contain a "created" header - */ @Test void applyCreated() { Namespace namespace = buildNamespace(); @@ -73,10 +73,6 @@ void applyCreated() { .verifyComplete(); } - /** - * Test the schema creation - * The response should contain a "changed" header - */ @Test void applyChanged() { Namespace namespace = buildNamespace(); @@ -86,7 +82,7 @@ void applyChanged() { when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(true); when(schemaService.validateSchemaCompatibility("local", schema)).thenReturn(Mono.just(List.of())); when(schemaService.getLatestSubject(namespace, schema.getMetadata().getName())) - .thenReturn(Mono.just(schema)); + .thenReturn(Mono.just(schema)); when(schemaService.register(namespace, schema)).thenReturn(Mono.just(2)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); @@ -101,10 +97,6 @@ void applyChanged() { .verifyComplete(); } - /** - * Test the schema creation - * The response should contain an "unchanged" header - */ @Test void applyUnchanged() { Namespace namespace = buildNamespace(); @@ -125,9 +117,6 @@ void applyUnchanged() { .verifyComplete(); } - /** - * Test the schema creation when the subject has wrong format - */ @Test void applyWrongSubjectName() { Namespace namespace = buildNamespace(); @@ -140,17 +129,14 @@ void applyWrongSubjectName() { .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Invalid value wrongSubjectName for name: subject must end with -key or -value", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Invalid value wrongSubjectName for name: subject must end with -key or -value", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); verify(schemaService, never()).register(namespace, schema); } - - /** - * Test the schema creation when the subject does not belong to the namespace - */ @Test void applyNamespaceNotOwnerOfSubject() { Namespace namespace = buildNamespace(); @@ -163,7 +149,8 @@ void applyNamespaceNotOwnerOfSubject() { .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Namespace not owner of this schema prefix.subject-value.", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Namespace not owner of this schema prefix.subject-value.", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); } @@ -210,9 +197,6 @@ void applyDryRunChanged() { verify(schemaService, never()).register(namespace, schema); } - /** - * Test the schema creation in dry mode when the schema is not compatible - */ @Test void applyDryRunNotCompatible() { Namespace namespace = buildNamespace(); @@ -220,7 +204,8 @@ void applyDryRunNotCompatible() { when(namespaceService.findByName("myNamespace")).thenReturn(Optional.of(namespace)); when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(true); - when(schemaService.validateSchemaCompatibility("local", schema)).thenReturn(Mono.just(List.of("Not compatible"))); + when(schemaService.validateSchemaCompatibility("local", schema)).thenReturn( + Mono.just(List.of("Not compatible"))); StepVerifier.create(schemaController.apply("myNamespace", schema, true)) .consumeErrorWith(error -> { @@ -233,9 +218,6 @@ void applyDryRunNotCompatible() { verify(schemaService, never()).register(namespace, schema); } - /** - * Test to get all schemas of namespace - */ @Test void list() { Namespace namespace = buildNamespace(); @@ -251,9 +233,6 @@ void list() { .verifyComplete(); } - /** - * Test to get a subject by namespace and subject - */ @Test void get() { Namespace namespace = buildNamespace(); @@ -268,9 +247,6 @@ void get() { .verifyComplete(); } - /** - * Test to get a subject by namespace and subject name when the required subject does not belong to the namespace - */ @Test void getNamespaceNotOwnerOfSubject() { Namespace namespace = buildNamespace(); @@ -280,14 +256,11 @@ void getNamespaceNotOwnerOfSubject() { when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(false); StepVerifier.create(schemaController.get("myNamespace", "prefix.subject-value")) - .verifyComplete(); + .verifyComplete(); verify(schemaService, never()).getLatestSubject(namespace, schema.getMetadata().getName()); } - /** - * Test the compatibility update - */ @Test void compatibilityUpdateSubjectNotExist() { Namespace namespace = buildNamespace(); @@ -297,16 +270,14 @@ void compatibilityUpdateSubjectNotExist() { when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(true); when(schemaService.getLatestSubject(namespace, "prefix.subject-value")).thenReturn(Mono.empty()); - StepVerifier.create(schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.FORWARD)) - .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) - .verifyComplete(); + StepVerifier.create( + schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.FORWARD)) + .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) + .verifyComplete(); verify(schemaService, never()).updateSubjectCompatibility(any(), any(), any()); } - /** - * Test the compatibility update - */ @Test void compatibilityUpdateChanged() { Namespace namespace = buildNamespace(); @@ -315,11 +286,13 @@ void compatibilityUpdateChanged() { when(namespaceService.findByName("myNamespace")).thenReturn(Optional.of(namespace)); when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(true); when(schemaService.getLatestSubject(namespace, "prefix.subject-value")).thenReturn(Mono.just(schema)); - when(schemaService.updateSubjectCompatibility(namespace, schema, Schema.Compatibility.FORWARD)).thenReturn(Mono.just(SchemaCompatibilityResponse.builder() + when(schemaService.updateSubjectCompatibility(namespace, schema, Schema.Compatibility.FORWARD)).thenReturn( + Mono.just(SchemaCompatibilityResponse.builder() .compatibilityLevel(Schema.Compatibility.FORWARD) .build())); - StepVerifier.create(schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.FORWARD)) + StepVerifier.create( + schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.FORWARD)) .consumeNextWith(response -> { assertEquals(HttpStatus.OK, response.getStatus()); assertTrue(response.getBody().isPresent()); @@ -329,9 +302,6 @@ void compatibilityUpdateChanged() { .verifyComplete(); } - /** - * Test the compatibility update when the compat did not change - */ @Test void compatibilityUpdateUnchanged() { Namespace namespace = buildNamespace(); @@ -342,7 +312,8 @@ void compatibilityUpdateUnchanged() { when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(true); when(schemaService.getLatestSubject(namespace, "prefix.subject-value")).thenReturn(Mono.just(schema)); - StepVerifier.create(schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.FORWARD)) + StepVerifier.create( + schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.FORWARD)) .consumeNextWith(response -> { assertEquals(HttpStatus.OK, response.getStatus()); assertTrue(response.getBody().isPresent()); @@ -354,9 +325,6 @@ void compatibilityUpdateUnchanged() { verify(schemaService, never()).updateSubjectCompatibility(namespace, schema, Schema.Compatibility.FORWARD); } - /** - * Test the compatibility update when the namespace is not owner of the subject - */ @Test void compatibilityUpdateNamespaceNotOwnerOfSubject() { Namespace namespace = buildNamespace(); @@ -365,20 +333,19 @@ void compatibilityUpdateNamespaceNotOwnerOfSubject() { when(namespaceService.findByName("myNamespace")).thenReturn(Optional.of(namespace)); when(schemaService.isNamespaceOwnerOfSubject(namespace, schema.getMetadata().getName())).thenReturn(false); - StepVerifier.create(schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.BACKWARD)) + StepVerifier.create( + schemaController.config("myNamespace", "prefix.subject-value", Schema.Compatibility.BACKWARD)) .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Invalid prefix prefix.subject-value : namespace not owner of this subject", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Invalid prefix prefix.subject-value : namespace not owner of this subject", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); verify(schemaService, never()).updateSubjectCompatibility(any(), any(), any()); } - /** - * Test the subject deletion when the namespace is not owner of the subject - */ @Test void deleteSubjectNamespaceNotOwnerOfSubject() { Namespace namespace = buildNamespace(); @@ -390,16 +357,14 @@ void deleteSubjectNamespaceNotOwnerOfSubject() { .consumeErrorWith(error -> { assertEquals(ResourceValidationException.class, error.getClass()); assertEquals(1, ((ResourceValidationException) error).getValidationErrors().size()); - assertEquals("Namespace not owner of this schema prefix.subject-value.", ((ResourceValidationException) error).getValidationErrors().get(0)); + assertEquals("Namespace not owner of this schema prefix.subject-value.", + ((ResourceValidationException) error).getValidationErrors().get(0)); }) .verify(); verify(schemaService, never()).updateSubjectCompatibility(any(), any(), any()); } - /** - * Test the subject deletion - */ @Test void deleteSubject() { Namespace namespace = buildNamespace(); @@ -420,9 +385,6 @@ void deleteSubject() { verify(schemaService, times(1)).deleteSubject(namespace, "prefix.subject-value"); } - /** - * Should not delete subject when empty - */ @Test void shouldNotDeleteSubjectWhenEmpty() { Namespace namespace = buildNamespace(); @@ -432,13 +394,10 @@ void shouldNotDeleteSubjectWhenEmpty() { when(schemaService.getLatestSubject(namespace, "prefix.subject-value")).thenReturn(Mono.empty()); StepVerifier.create(schemaController.deleteSubject("myNamespace", "prefix.subject-value", false)) - .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) - .verifyComplete(); + .consumeNextWith(response -> assertEquals(HttpStatus.NOT_FOUND, response.getStatus())) + .verifyComplete(); } - /** - * Test the subject deletion in dry mode - */ @Test void deleteSubjectDryRun() { Namespace namespace = buildNamespace(); @@ -455,43 +414,43 @@ void deleteSubjectDryRun() { verify(schemaService, never()).deleteSubject(namespace, "prefix.subject-value"); } - /** - * Build a namespace resource - * @return The namespace - */ private Namespace buildNamespace() { return Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); } - /** - * Build a schema resource - * @return The schema - */ private Schema buildSchema() { return Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.subject-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .id(1) - .version(1) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.subject-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .id(1) + .version(1) + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\"" + + ",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"Last name of the person\"}," + + "{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\"," + + "\"logicalType\":\"timestamp-millis\"}]," + + "\"default\":null,\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); } private SchemaList buildSchemaList() { return SchemaList.builder() - .metadata(ObjectMeta.builder() - .name("prefix.subject-value") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.subject-value") + .build()) + .build(); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java index 1809e3c6..a6781762 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/StreamControllerTest.java @@ -1,5 +1,14 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AuditLog; import com.michelin.ns4kafka.models.KafkaStream; import com.michelin.ns4kafka.models.Namespace; @@ -11,20 +20,14 @@ import io.micronaut.context.event.ApplicationEventPublisher; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class StreamControllerTest { @Mock @@ -42,54 +45,48 @@ class StreamControllerTest { @InjectMocks StreamController streamController; - /** - * Validate empty Kafka Streams listing - */ @Test void listEmptyStreams() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); List actual = streamController.list("test"); assertEquals(0, actual.size()); } - /** - * Validate Kafka Streams listing - */ @Test void listStreams() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); KafkaStream stream2 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream2") - .build()) + .name("test_stream2") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.findAllForNamespace(ns)) - .thenReturn(List.of(stream1, stream2)); + .thenReturn(List.of(stream1, stream2)); List actual = streamController.list("test"); assertEquals(2, actual.size()); @@ -97,51 +94,45 @@ void listStreams() { assertTrue(actual.contains(stream2)); } - /** - * Validate get Kafka Streams with empty response - */ @Test void getEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); Optional actual = streamController.get("test", "test_stream1"); assertTrue(actual.isEmpty()); } - /** - * Validate get Kafka Streams - */ @Test void getStreamFound() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.of(stream1)); + .thenReturn(Optional.of(stream1)); Optional actual = streamController.get("test", "test_stream1"); assertTrue(actual.isPresent()); @@ -151,32 +142,32 @@ void getStreamFound() { @Test void createStreamSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(true); + .thenReturn(true); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); when(streamService.create(stream1)) - .thenReturn(stream1); + .thenReturn(stream1); var response = streamController.apply("test", stream1, false); KafkaStream actual = response.body(); @@ -187,30 +178,30 @@ void createStreamSuccess() { @Test void createStreamSuccessDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(true); + .thenReturn(true); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); var response = streamController.apply("test", stream1, true); KafkaStream actual = response.body(); - Mockito.verify(streamService, never()).create(any()); + verify(streamService, never()).create(any()); assertEquals("created", response.header("X-Ns4kafka-Result")); assertEquals("test_stream1", actual.getMetadata().getName()); } @@ -218,30 +209,30 @@ void createStreamSuccessDryRun() { @Test void updateStreamUnchanged() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(true); + .thenReturn(true); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.of(stream1)); + .thenReturn(Optional.of(stream1)); var response = streamController.apply("test", stream1, false); KafkaStream actual = response.body(); - Mockito.verify(streamService, never()).create(any()); + verify(streamService, never()).create(any()); assertEquals("unchanged", response.header("X-Ns4kafka-Result")); assertEquals("test_stream1", actual.getMetadata().getName()); } @@ -249,140 +240,128 @@ void updateStreamUnchanged() { @Test void createStreamValidationError() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(false); + .thenReturn(false); assertThrows(ResourceValidationException.class, () -> streamController.apply("test", stream1, false)); - Mockito.verify(streamService, never()).create(any()); + verify(streamService, never()).create(any()); } - /** - * Validate Kafka Streams deletion - */ @Test void deleteStreamSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(true); + .thenReturn(true); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.of(stream1)); + .thenReturn(Optional.of(stream1)); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(applicationEventPublisher).publishEvent(any()); - doNothing().when(streamService).delete(ns,stream1); + doNothing().when(streamService).delete(ns, stream1); var response = streamController.delete("test", "test_stream1", false); assertEquals(HttpStatus.NO_CONTENT, response.getStatus()); } - /** - * Validate Kafka Streams deletion in dry mode - */ @Test void deleteStreamSuccessDryRun() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .build()) + .name("test_stream1") + .build()) .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(true); + .thenReturn(true); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.of(stream1)); + .thenReturn(Optional.of(stream1)); var response = streamController.delete("test", "test_stream1", true); - Mockito.verify(streamService, never()).delete(any(), any()); + verify(streamService, never()).delete(any(), any()); assertEquals(HttpStatus.NO_CONTENT, response.getStatus()); } - /** - * Validate Kafka Streams deletion fails when not found - */ @Test void deleteStreamNotFound() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(true); + .thenReturn(true); when(streamService.findByName(ns, "test_stream1")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); var response = streamController.delete("test", "test_stream1", false); - Mockito.verify(streamService, never()).delete(any(), any()); + verify(streamService, never()).delete(any(), any()); assertEquals(HttpStatus.NOT_FOUND, response.getStatus()); } - /** - * Validate Kafka Streams deletion fails when not owner - */ @Test void deleteStreamNotOwner() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(streamService.isNamespaceOwnerOfKafkaStream(ns, "test_stream1")) - .thenReturn(false); + .thenReturn(false); assertThrows(ResourceValidationException.class, () -> streamController.delete("test", "test_stream1", false)); - Mockito.verify(streamService, never()).delete(any(), any()); + verify(streamService, never()).delete(any(), any()); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java index 369ead59..23603594 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicControllerTest.java @@ -1,8 +1,24 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.topic.TopicController; -import com.michelin.ns4kafka.models.*; +import com.michelin.ns4kafka.models.AuditLog; +import com.michelin.ns4kafka.models.DeleteRecordsResponse; +import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.security.ResourceBasedSecurityRule; import com.michelin.ns4kafka.services.NamespaceService; import com.michelin.ns4kafka.services.ResourceQuotaService; @@ -13,6 +29,11 @@ import io.micronaut.http.HttpResponse; import io.micronaut.http.HttpStatus; import io.micronaut.security.utils.SecurityService; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import org.apache.kafka.common.TopicPartition; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -22,16 +43,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class TopicControllerTest { @Mock @@ -52,46 +63,40 @@ class TopicControllerTest { @InjectMocks TopicController topicController; - /** - * Validate empty topics listing - */ @Test void listEmptyTopics() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); List actual = topicController.list("test"); assertEquals(0, actual.size()); } - /** - * Validate topics listing - */ @Test void listMultipleTopics() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of( - Topic.builder().metadata(ObjectMeta.builder().name("topic1").build()).build(), - Topic.builder().metadata(ObjectMeta.builder().name("topic2").build()).build() - )); + .thenReturn(List.of( + Topic.builder().metadata(ObjectMeta.builder().name("topic1").build()).build(), + Topic.builder().metadata(ObjectMeta.builder().name("topic2").build()).build() + )); List actual = topicController.list("test"); @@ -100,46 +105,40 @@ void listMultipleTopics() { assertEquals("topic2", actual.get(1).getMetadata().getName()); } - /** - * Validate get topic empty response - */ @Test void getEmptyTopic() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "topic.notfound")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); Optional actual = topicController.getTopic("test", "topic.notfound"); assertTrue(actual.isEmpty()); } - /** - * Validate get topic - */ @Test void getTopic() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "topic.found")) - .thenReturn(Optional.of( - Topic.builder().metadata(ObjectMeta.builder().name("topic.found").build()).build() - )); + .thenReturn(Optional.of( + Topic.builder().metadata(ObjectMeta.builder().name("topic.found").build()).build() + )); Optional actual = topicController.getTopic("test", "topic.found"); @@ -147,29 +146,23 @@ void getTopic() { assertEquals("topic.found", actual.get().getMetadata().getName()); } - /** - * Validate topic deletion - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void deleteTopic() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); Optional toDelete = Optional.of( - Topic.builder().metadata(ObjectMeta.builder().name("topic.delete").build()).build()); + Topic.builder().metadata(ObjectMeta.builder().name("topic.delete").build()).build()); when(topicService.findByName(ns, "topic.delete")) - .thenReturn(toDelete); - when(topicService.isNamespaceOwnerOfTopic("test","topic.delete")) - .thenReturn(true); + .thenReturn(toDelete); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.delete")) + .thenReturn(true); when(securityService.username()).thenReturn(Optional.of("test-user")); when(securityService.hasRole(ResourceBasedSecurityRule.IS_ADMIN)).thenReturn(false); doNothing().when(topicService).delete(toDelete.get()); @@ -180,89 +173,74 @@ void deleteTopic() throws InterruptedException, ExecutionException, TimeoutExcep assertEquals(HttpStatus.NO_CONTENT, actual.getStatus()); } - /** - * Validate topic deletion in dry mode - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void deleteTopicDryRun() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); Optional toDelete = Optional.of( - Topic.builder().metadata(ObjectMeta.builder().name("topic.delete").build()).build()); + Topic.builder().metadata(ObjectMeta.builder().name("topic.delete").build()).build()); when(topicService.findByName(ns, "topic.delete")) - .thenReturn(toDelete); - when(topicService.isNamespaceOwnerOfTopic("test","topic.delete")) - .thenReturn(true); + .thenReturn(toDelete); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.delete")) + .thenReturn(true); topicController.deleteTopic("test", "topic.delete", true); verify(topicService, never()).delete(any()); } - /** - * Validate topic deletion when unauthorized - */ @Test - void deleteTopicUnauthorized() { + void deleteTopicUnauthorized() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic("test", "topic.delete")) - .thenReturn(false); + .thenReturn(false); assertThrows(ResourceValidationException.class, - () -> topicController.deleteTopic("test", "topic.delete", false)); + () -> topicController.deleteTopic("test", "topic.delete", false)); } - /** - * Validate topic creation - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void createNewTopic() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); when(resourceQuotaService.validateTopicQuota(ns, Optional.empty(), topic)).thenReturn(List.of()); @@ -281,31 +259,31 @@ void createNewTopic() throws InterruptedException, ExecutionException, TimeoutEx @Test void shouldCreateNewTopicWithNoConstraint() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.builder() - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.builder() + .build()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); when(resourceQuotaService.validateTopicQuota(ns, Optional.empty(), topic)).thenReturn(List.of()); @@ -321,52 +299,46 @@ void shouldCreateNewTopicWithNoConstraint() throws InterruptedException, Executi assertEquals("test.topic", actual.getMetadata().getName()); } - /** - * Validate topic update - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void updateTopic() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); when(topicService.create(topic)).thenReturn(topic); when(securityService.username()).thenReturn(Optional.of("test-user")); @@ -379,106 +351,99 @@ void updateTopic() throws InterruptedException, ExecutionException, TimeoutExcep assertEquals("test.topic", actual.getMetadata().getName()); } - /** - * Validate topic update when there are validations errors - */ @Test void updateTopicValidationErrors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(6) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(6) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); - when(topicService.validateTopicUpdate(ns, existing, topic)).thenReturn(List.of("Invalid value 6 for configuration partitions: Value is immutable (3).")); + when(topicService.validateTopicUpdate(ns, existing, topic)).thenReturn( + List.of("Invalid value 6 for configuration partitions: Value is immutable (3).")); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> topicController.apply("test", topic, false)); + () -> topicController.apply("test", topic, false)); assertEquals(1, actual.getValidationErrors().size()); - assertLinesMatch(List.of("Invalid value 6 for configuration partitions: Value is immutable (3)."), actual.getValidationErrors()); + assertLinesMatch(List.of("Invalid value 6 for configuration partitions: Value is immutable (3)."), + actual.getValidationErrors()); } - /** - * Validate topic update when topic doesn't change - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .namespace("test") - .cluster("local") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .namespace("test") + .cluster("local") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.of(existing)); var response = topicController.apply("test", topic, false); @@ -488,39 +453,33 @@ void updateTopicAlreadyExistsUnchanged() throws InterruptedException, ExecutionE assertEquals(existing, actual); } - /** - * Validate topic creation in dry mode - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void createNewTopicDryRun() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); when(resourceQuotaService.validateTopicQuota(ns, Optional.empty(), topic)).thenReturn(List.of()); @@ -530,68 +489,66 @@ void createNewTopicDryRun() throws InterruptedException, ExecutionException, Tim verify(topicService, never()).create(topic); } - /** - * Validate topic creation when topic validation fails - */ @Test void createNewTopicFailValidation() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("test.topic").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(1) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("test.topic").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(1) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> topicController.apply("test", topic, false)); + () -> topicController.apply("test", topic, false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch(List.of(".*replication\\.factor.*"), actual.getValidationErrors()); } @Test - void shouldNotFailWhenCreatingNewTopicWithNoValidator() throws ExecutionException, InterruptedException, TimeoutException { + void shouldNotFailWhenCreatingNewTopicWithNoValidator() + throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(null) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(null) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("test.topic").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(1) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("test.topic").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(1) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); @@ -601,31 +558,32 @@ void shouldNotFailWhenCreatingNewTopicWithNoValidator() throws ExecutionExceptio } @Test - void shouldNotFailWhenCreatingNewTopicWithNoValidationConstraint() throws ExecutionException, InterruptedException, TimeoutException { + void shouldNotFailWhenCreatingNewTopicWithNoValidationConstraint() + throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.builder() - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.builder() + .build()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("test.topic").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(1) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("test.topic").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(1) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); @@ -634,93 +592,84 @@ void shouldNotFailWhenCreatingNewTopicWithNoValidationConstraint() throws Execut verify(topicService, never()).create(topic); } - /** - * Validate topic creation when topic quota validation fails - */ @Test void createNewTopicFailQuotaValidation() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); when(resourceQuotaService.validateTopicQuota(ns, Optional.empty(), topic)).thenReturn(List.of("Quota error")); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> topicController.apply("test", topic, false)); + () -> topicController.apply("test", topic, false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch(List.of("Quota error"), actual.getValidationErrors()); } - /** - * Validate topic import - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void importTopic() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic1") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic1") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic2") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic2") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.listUnsynchronizedTopics(ns)) - .thenReturn(List.of(topic1, topic2)); + .thenReturn(List.of(topic1, topic2)); when(topicService.create(topic1)).thenReturn(topic1); when(topicService.create(topic2)).thenReturn(topic2); @@ -728,142 +677,125 @@ void importTopic() throws InterruptedException, ExecutionException, TimeoutExcep List actual = topicController.importResources("test", false); assertTrue(actual.stream() - .anyMatch(t -> - t.getMetadata().getName().equals("test.topic1") - && t.getStatus().getMessage().equals("Imported from cluster") - && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) - )); + .anyMatch(t -> + t.getMetadata().getName().equals("test.topic1") + && t.getStatus().getMessage().equals("Imported from cluster") + && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) + )); assertTrue(actual.stream() - .anyMatch(t -> - t.getMetadata().getName().equals("test.topic2") - && t.getStatus().getMessage().equals("Imported from cluster") - && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) - )); + .anyMatch(t -> + t.getMetadata().getName().equals("test.topic2") + && t.getStatus().getMessage().equals("Imported from cluster") + && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) + )); Assertions.assertFalse(actual.stream() - .anyMatch(t -> - t.getMetadata().getName().equals("test.topic3") - )); + .anyMatch(t -> + t.getMetadata().getName().equals("test.topic3") + )); } - /** - * Validate topic import in dry mode - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void importTopicDryRun() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic1") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic1") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic2") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic2") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.listUnsynchronizedTopics(ns)) - .thenReturn(List.of(topic1, topic2)); + .thenReturn(List.of(topic1, topic2)); List actual = topicController.importResources("test", true); assertTrue(actual.stream() - .anyMatch(t -> - t.getMetadata().getName().equals("test.topic1") - && t.getStatus().getMessage().equals("Imported from cluster") - && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) - )); + .anyMatch(t -> + t.getMetadata().getName().equals("test.topic1") + && t.getStatus().getMessage().equals("Imported from cluster") + && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) + )); assertTrue(actual.stream() - .anyMatch(t -> - t.getMetadata().getName().equals("test.topic2") - && t.getStatus().getMessage().equals("Imported from cluster") - && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) - )); + .anyMatch(t -> + t.getMetadata().getName().equals("test.topic2") + && t.getStatus().getMessage().equals("Imported from cluster") + && t.getStatus().getPhase().equals(Topic.TopicPhase.Success) + )); Assertions.assertFalse(actual.stream() - .anyMatch(t -> - t.getMetadata().getName().equals("test.topic3") - )); + .anyMatch(t -> + t.getMetadata().getName().equals("test.topic3") + )); } - /** - * Validate delete records - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - */ @Test void deleteRecordsSuccess() throws ExecutionException, InterruptedException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Topic toEmpty = Topic.builder().metadata(ObjectMeta.builder().name("topic.empty").build()).build(); Map partitionsToDelete = Map.of( - new TopicPartition("topic.empty",0), 100L, - new TopicPartition("topic.empty", 1), 101L); + new TopicPartition("topic.empty", 0), 100L, + new TopicPartition("topic.empty", 1), 101L); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(topicService.isNamespaceOwnerOfTopic("test","topic.empty")) - .thenReturn(true); + .thenReturn(Optional.of(ns)); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.empty")) + .thenReturn(true); when(topicService.validateDeleteRecordsTopic(toEmpty)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(topicService.findByName(ns, "topic.empty")) - .thenReturn(Optional.of(toEmpty)); + .thenReturn(Optional.of(toEmpty)); when(topicService.prepareRecordsToDelete(toEmpty)) - .thenReturn(partitionsToDelete); + .thenReturn(partitionsToDelete); when(topicService.deleteRecords(ArgumentMatchers.eq(toEmpty), anyMap())) - .thenReturn(partitionsToDelete); + .thenReturn(partitionsToDelete); List actual = topicController.deleteRecords("test", "topic.empty", false); DeleteRecordsResponse resultPartition0 = actual - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 0) - .findFirst() - .orElse(null); - - DeleteRecordsResponse resultPartition1 = actual - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 1) - .findFirst() - .orElse(null); + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 0) + .findFirst() + .orElse(null); assertEquals(2L, actual.size()); @@ -872,87 +804,79 @@ void deleteRecordsSuccess() throws ExecutionException, InterruptedException { assertEquals(0, resultPartition0.getSpec().getPartition()); assertEquals("topic.empty", resultPartition0.getSpec().getTopic()); + DeleteRecordsResponse resultPartition1 = actual + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 1) + .findFirst() + .orElse(null); + assertNotNull(resultPartition1); assertEquals(101L, resultPartition1.getSpec().getOffset()); assertEquals(1, resultPartition1.getSpec().getPartition()); assertEquals("topic.empty", resultPartition1.getSpec().getTopic()); } - /** - * Validate delete records fails on compacted topic - */ @Test void deleteRecordsCompactedTopic() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Topic toEmpty = Topic.builder().metadata(ObjectMeta.builder().name("topic.empty").build()).build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(topicService.isNamespaceOwnerOfTopic("test","topic.empty")) - .thenReturn(true); + .thenReturn(Optional.of(ns)); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.empty")) + .thenReturn(true); when(topicService.validateDeleteRecordsTopic(toEmpty)) - .thenReturn(List.of("Cannot delete records on a compacted topic. Please delete and recreate the topic.")); + .thenReturn(List.of("Cannot delete records on a compacted topic. Please delete and recreate the topic.")); when(topicService.findByName(ns, "topic.empty")) - .thenReturn(Optional.of(toEmpty)); + .thenReturn(Optional.of(toEmpty)); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> topicController.deleteRecords("test", "topic.empty", false)); + () -> topicController.deleteRecords("test", "topic.empty", false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch(List.of("Cannot delete records on a compacted topic. Please delete and recreate the topic."), - actual.getValidationErrors()); + actual.getValidationErrors()); } - /** - * Validate delete records in dry mode - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - */ @Test void deleteRecordsDryRun() throws InterruptedException, ExecutionException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); Topic toEmpty = Topic.builder().metadata(ObjectMeta.builder().name("topic.empty").build()).build(); Map partitionsToDelete = Map.of( - new TopicPartition("topic.empty",0), 100L, - new TopicPartition("topic.empty", 1), 101L); + new TopicPartition("topic.empty", 0), 100L, + new TopicPartition("topic.empty", 1), 101L); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(topicService.isNamespaceOwnerOfTopic("test","topic.empty")) - .thenReturn(true); + .thenReturn(Optional.of(ns)); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.empty")) + .thenReturn(true); when(topicService.validateDeleteRecordsTopic(toEmpty)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(topicService.findByName(ns, "topic.empty")) - .thenReturn(Optional.of(toEmpty)); + .thenReturn(Optional.of(toEmpty)); when(topicService.prepareRecordsToDelete(toEmpty)) - .thenReturn(partitionsToDelete); + .thenReturn(partitionsToDelete); List actual = topicController.deleteRecords("test", "topic.empty", true); DeleteRecordsResponse resultPartition0 = actual - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 0) - .findFirst() - .orElse(null); - - DeleteRecordsResponse resultPartition1 = actual - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 1) - .findFirst() - .orElse(null); + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 0) + .findFirst() + .orElse(null); assertEquals(2L, actual.size()); @@ -961,6 +885,12 @@ void deleteRecordsDryRun() throws InterruptedException, ExecutionException { assertEquals(0, resultPartition0.getSpec().getPartition()); assertEquals("topic.empty", resultPartition0.getSpec().getTopic()); + DeleteRecordsResponse resultPartition1 = actual + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 1) + .findFirst() + .orElse(null); + assertNotNull(resultPartition1); assertEquals(101L, resultPartition1.getSpec().getOffset()); assertEquals(1, resultPartition1.getSpec().getPartition()); @@ -969,98 +899,87 @@ void deleteRecordsDryRun() throws InterruptedException, ExecutionException { verify(topicService, never()).deleteRecords(any(), anyMap()); } - /** - * Validate delete records when not owner of topic - */ @Test void deleteRecordsNotOwner() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(topicService.isNamespaceOwnerOfTopic("test","topic.empty")) - .thenReturn(false); + .thenReturn(Optional.of(ns)); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.empty")) + .thenReturn(false); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> topicController.deleteRecords("test", "topic.empty", false)); + () -> topicController.deleteRecords("test", "topic.empty", false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch(List.of("Namespace not owner of this topic \"topic.empty\"."), - actual.getValidationErrors()); + actual.getValidationErrors()); } - /** - * Validate delete records when not owner of topic - */ @Test void deleteRecordsNotExistingTopic() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); - when(topicService.isNamespaceOwnerOfTopic("test","topic.empty")) - .thenReturn(true); + .thenReturn(Optional.of(ns)); + when(topicService.isNamespaceOwnerOfTopic("test", "topic.empty")) + .thenReturn(true); when(topicService.findByName(ns, "topic.empty")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); ResourceValidationException actual = assertThrows(ResourceValidationException.class, - () -> topicController.deleteRecords("test", "topic.empty", false)); + () -> topicController.deleteRecords("test", "topic.empty", false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch(List.of("Topic \"topic.empty\" does not exist."), - actual.getValidationErrors()); + actual.getValidationErrors()); } - /** - * Validate topic creation with name collision - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void createCollidingTopic() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .topicValidator(TopicValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .topicValidator(TopicValidator.makeDefault()) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); when(namespaceService.findByName("test")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(topicService.isNamespaceOwnerOfTopic(any(), any())).thenReturn(true); when(topicService.findByName(ns, "test.topic")).thenReturn(Optional.empty()); when(topicService.findCollidingTopics(ns, topic)).thenReturn(List.of("test_topic")); - ResourceValidationException actual = assertThrows(ResourceValidationException.class, () -> topicController.apply("test", topic, false)); + ResourceValidationException actual = + assertThrows(ResourceValidationException.class, () -> topicController.apply("test", topic, false)); assertEquals(1, actual.getValidationErrors().size()); assertLinesMatch( - List.of("Topic test.topic collides with existing topics: test_topic."), - actual.getValidationErrors()); + List.of("Topic test.topic collides with existing topics: test_topic."), + actual.getValidationErrors()); } } diff --git a/src/test/java/com/michelin/ns4kafka/controllers/TopicNonNamespacedControllerTest.java b/src/test/java/com/michelin/ns4kafka/controllers/TopicNonNamespacedControllerTest.java index e74cf388..41f28f0e 100644 --- a/src/test/java/com/michelin/ns4kafka/controllers/TopicNonNamespacedControllerTest.java +++ b/src/test/java/com/michelin/ns4kafka/controllers/TopicNonNamespacedControllerTest.java @@ -1,20 +1,19 @@ package com.michelin.ns4kafka.controllers; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.controllers.topic.TopicNonNamespacedController; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.services.TopicService; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class TopicNonNamespacedControllerTest { @Mock @@ -23,16 +22,13 @@ class TopicNonNamespacedControllerTest { @InjectMocks TopicNonNamespacedController topicController; - /** - * Validate topics listing - */ @Test void listAll() { when(topicService.findAll()) - .thenReturn(List.of( - Topic.builder().metadata(ObjectMeta.builder().name("topic1").build()).build(), - Topic.builder().metadata(ObjectMeta.builder().name("topic2").build()).build() - )); + .thenReturn(List.of( + Topic.builder().metadata(ObjectMeta.builder().name("topic1").build()).build(), + Topic.builder().metadata(ObjectMeta.builder().name("topic2").build()).build() + )); List actual = topicController.listAll(); diff --git a/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationConnectTest.java b/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationConnectTest.java index 15c21b74..6290fc1a 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationConnectTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationConnectTest.java @@ -2,18 +2,21 @@ import com.michelin.ns4kafka.testcontainers.KafkaConnectContainer; import io.micronaut.core.annotation.NonNull; -import org.junit.jupiter.api.TestInstance; -import org.testcontainers.utility.DockerImageName; - import java.util.HashMap; import java.util.Map; +import org.junit.jupiter.api.TestInstance; +import org.testcontainers.utility.DockerImageName; +/** + * Kafka Connect integration test. + */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class AbstractIntegrationConnectTest extends AbstractIntegrationTest { public KafkaConnectContainer connect; /** - * Starts the Kafka Connect container + * Starts the Kafka Connect container. + * * @return Properties enriched with the Kafka Connect URL */ @NonNull @@ -21,11 +24,14 @@ public abstract class AbstractIntegrationConnectTest extends AbstractIntegration public Map getProperties() { Map brokerProps = super.getProperties(); if (connect == null || !connect.isRunning()) { - connect = new KafkaConnectContainer(DockerImageName.parse("confluentinc/cp-kafka-connect:" + CONFLUENT_VERSION), + connect = + new KafkaConnectContainer(DockerImageName.parse("confluentinc/cp-kafka-connect:" + CONFLUENT_VERSION), "kafka:9092") .withEnv("CONNECT_SASL_MECHANISM", "PLAIN") .withEnv("CONNECT_SECURITY_PROTOCOL", "SASL_PLAINTEXT") - .withEnv("CONNECT_SASL_JAAS_CONFIG", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";") + .withEnv("CONNECT_SASL_JAAS_CONFIG", + "org.apache.kafka.common.security.plain.PlainLoginModule " + + "required username=\"admin\" password=\"admin\";") .withNetwork(network); connect.start(); } diff --git a/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationSchemaRegistryTest.java b/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationSchemaRegistryTest.java index 7cbeef99..d8496fff 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationSchemaRegistryTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationSchemaRegistryTest.java @@ -2,19 +2,22 @@ import com.michelin.ns4kafka.testcontainers.SchemaRegistryContainer; import io.micronaut.core.annotation.NonNull; -import org.junit.jupiter.api.TestInstance; -import org.testcontainers.utility.DockerImageName; - import java.util.HashMap; import java.util.Map; +import org.junit.jupiter.api.TestInstance; +import org.testcontainers.utility.DockerImageName; +/** + * Schema Registry integration test. + */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class AbstractIntegrationSchemaRegistryTest extends AbstractIntegrationTest { public static final String CONFLUENT_REGISTRY_VERSION = "7.4.1"; public SchemaRegistryContainer schemaRegistryContainer; /** - * Starts the Schema registry container + * Starts the Schema registry container. + * * @return Properties enriched with the Schema Registry URL */ @NonNull @@ -22,12 +25,15 @@ public abstract class AbstractIntegrationSchemaRegistryTest extends AbstractInte public Map getProperties() { Map brokerProps = super.getProperties(); if (schemaRegistryContainer == null || !schemaRegistryContainer.isRunning()) { - schemaRegistryContainer = new SchemaRegistryContainer(DockerImageName.parse("confluentinc/cp-schema-registry:" + CONFLUENT_REGISTRY_VERSION), - "kafka:9092") - .withEnv("SCHEMA_REGISTRY_KAFKASTORE_SASL_MECHANISM", "PLAIN") - .withEnv("SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL", "SASL_PLAINTEXT") - .withEnv("SCHEMA_REGISTRY_KAFKASTORE_SASL_JAAS_CONFIG", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";") - .withNetwork(network); + schemaRegistryContainer = new SchemaRegistryContainer( + DockerImageName.parse("confluentinc/cp-schema-registry:" + CONFLUENT_REGISTRY_VERSION), + "kafka:9092") + .withEnv("SCHEMA_REGISTRY_KAFKASTORE_SASL_MECHANISM", "PLAIN") + .withEnv("SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL", "SASL_PLAINTEXT") + .withEnv("SCHEMA_REGISTRY_KAFKASTORE_SASL_JAAS_CONFIG", + "org.apache.kafka.common.security.plain.PlainLoginModule " + + "required username=\"admin\" password=\"admin\";") + .withNetwork(network); schemaRegistryContainer.start(); } diff --git a/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationTest.java b/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationTest.java index b983ec16..23c423b1 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/AbstractIntegrationTest.java @@ -2,14 +2,16 @@ import io.micronaut.core.annotation.NonNull; import io.micronaut.test.support.TestPropertyProvider; +import java.util.Map; import org.apache.kafka.clients.admin.Admin; import org.junit.jupiter.api.TestInstance; import org.testcontainers.containers.KafkaContainer; import org.testcontainers.containers.Network; import org.testcontainers.utility.DockerImageName; -import java.util.Map; - +/** + * Abstract integration test. + */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class AbstractIntegrationTest implements TestPropertyProvider { public static final String CONFLUENT_VERSION = "7.4.1"; @@ -24,56 +26,67 @@ public Map getProperties() { if (kafka == null || !kafka.isRunning()) { network = Network.newNetwork(); kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:" + CONFLUENT_VERSION)) - .withEnv( - "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP", - "PLAINTEXT:SASL_PLAINTEXT,BROKER:SASL_PLAINTEXT" - ) - .withEnv("KAFKA_INTER_BROKER_LISTENER_NAME", "BROKER") - .withEnv("KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL", "PLAIN") - .withEnv("KAFKA_LISTENER_NAME_PLAINTEXT_SASL_ENABLED_MECHANISMS", "PLAIN") - .withEnv("KAFKA_LISTENER_NAME_BROKER_SASL_ENABLED_MECHANISMS", "PLAIN") + .withEnv( + "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP", + "PLAINTEXT:SASL_PLAINTEXT,BROKER:SASL_PLAINTEXT" + ) + .withEnv("KAFKA_INTER_BROKER_LISTENER_NAME", "BROKER") + .withEnv("KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL", "PLAIN") + .withEnv("KAFKA_LISTENER_NAME_PLAINTEXT_SASL_ENABLED_MECHANISMS", "PLAIN") + .withEnv("KAFKA_LISTENER_NAME_BROKER_SASL_ENABLED_MECHANISMS", "PLAIN") - .withEnv("KAFKA_LISTENER_NAME_BROKER_PLAIN_SASL_JAAS_CONFIG", - "org.apache.kafka.common.security.plain.PlainLoginModule required " + - "username=\"admin\" password=\"admin\" " + - "user_admin=\"admin\" " + - "user_client=\"client\";") - .withEnv("KAFKA_SASL_JAAS_CONFIG", - "org.apache.kafka.common.security.plain.PlainLoginModule required " + - "username=\"admin\" password=\"admin\";") - .withEnv( - "KAFKA_LISTENER_NAME_PLAINTEXT_PLAIN_SASL_JAAS_CONFIG", - "org.apache.kafka.common.security.plain.PlainLoginModule required " + - "username=\"admin\" password=\"admin\" " + - "user_admin=\"admin\" " + - "user_client=\"client\";") - .withEnv("KAFKA_AUTHORIZER_CLASS_NAME", "kafka.security.authorizer.AclAuthorizer") - .withEnv("KAFKA_SUPER_USERS", "User:admin") - .withNetworkAliases("kafka") - .withNetwork(network); + .withEnv("KAFKA_LISTENER_NAME_BROKER_PLAIN_SASL_JAAS_CONFIG", + "org.apache.kafka.common.security.plain.PlainLoginModule required " + + "username=\"admin\" password=\"admin\" " + + "user_admin=\"admin\" " + + "user_client=\"client\";") + .withEnv("KAFKA_SASL_JAAS_CONFIG", + "org.apache.kafka.common.security.plain.PlainLoginModule required " + + "username=\"admin\" password=\"admin\";") + .withEnv( + "KAFKA_LISTENER_NAME_PLAINTEXT_PLAIN_SASL_JAAS_CONFIG", + "org.apache.kafka.common.security.plain.PlainLoginModule required " + + "username=\"admin\" password=\"admin\" " + + "user_admin=\"admin\" " + + "user_client=\"client\";") + .withEnv("KAFKA_AUTHORIZER_CLASS_NAME", "kafka.security.authorizer.AclAuthorizer") + .withEnv("KAFKA_SUPER_USERS", "User:admin") + .withNetworkAliases("kafka") + .withNetwork(network); kafka.start(); } return Map.of( - "kafka.bootstrap.servers", kafka.getHost()+":"+kafka.getMappedPort(9093), - "kafka.sasl.mechanism", "PLAIN", - "kafka.security.protocol", "SASL_PLAINTEXT", - "kafka.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";", + "kafka.bootstrap.servers", kafka.getHost() + ":" + kafka.getMappedPort(9093), + "kafka.sasl.mechanism", "PLAIN", + "kafka.security.protocol", "SASL_PLAINTEXT", + "kafka.sasl.jaas.config", + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";", - "ns4kafka.managed-clusters.test-cluster.config.bootstrap.servers", kafka.getHost()+":"+kafka.getMappedPort(9093), - "ns4kafka.managed-clusters.test-cluster.config.sasl.mechanism", "PLAIN", - "ns4kafka.managed-clusters.test-cluster.config.security.protocol", "SASL_PLAINTEXT", - "ns4kafka.managed-clusters.test-cluster.config.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";" + "ns4kafka.managed-clusters.test-cluster.config.bootstrap.servers", + kafka.getHost() + ":" + kafka.getMappedPort(9093), + "ns4kafka.managed-clusters.test-cluster.config.sasl.mechanism", "PLAIN", + "ns4kafka.managed-clusters.test-cluster.config.security.protocol", "SASL_PLAINTEXT", + "ns4kafka.managed-clusters.test-cluster.config.sasl.jaas.config", + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";" ); } + /** + * Getter for admin client service. + * + * @return The admin client + */ public Admin getAdminClient() { - if (adminClient == null) + if (adminClient == null) { adminClient = Admin.create(Map.of( - "bootstrap.servers", kafka.getHost()+":"+kafka.getMappedPort(9093), - "sasl.mechanism", "PLAIN", - "security.protocol", "SASL_PLAINTEXT", - "sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin\";")); + "bootstrap.servers", kafka.getHost() + ":" + kafka.getMappedPort(9093), + "sasl.mechanism", "PLAIN", + "security.protocol", "SASL_PLAINTEXT", + "sasl.jaas.config", + "org.apache.kafka.common.security.plain.PlainLoginModule " + + "required username=\"admin\" password=\"admin\";")); + } return adminClient; } } diff --git a/src/test/java/com/michelin/ns4kafka/integration/AccessControlListTest.java b/src/test/java/com/michelin/ns4kafka/integration/AccessControlListTest.java index 92a1517a..800c6831 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/AccessControlListTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/AccessControlListTest.java @@ -1,14 +1,24 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.integration.TopicTest.BearerAccessRefreshToken; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.AccessControlEntry.AccessControlEntrySpec; import com.michelin.ns4kafka.models.AccessControlEntry.Permission; import com.michelin.ns4kafka.models.AccessControlEntry.ResourcePatternType; import com.michelin.ns4kafka.models.AccessControlEntry.ResourceType; -import com.michelin.ns4kafka.models.*; +import com.michelin.ns4kafka.models.KafkaStream; +import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; -import com.michelin.ns4kafka.models.RoleBinding.*; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.models.RoleBinding.Role; +import com.michelin.ns4kafka.models.RoleBinding.RoleBindingSpec; +import com.michelin.ns4kafka.models.RoleBinding.Subject; +import com.michelin.ns4kafka.models.RoleBinding.SubjectType; +import com.michelin.ns4kafka.models.RoleBinding.Verb; import com.michelin.ns4kafka.services.executors.AccessControlEntryAsyncExecutor; import com.michelin.ns4kafka.validation.TopicValidator; import io.micronaut.context.annotation.Property; @@ -20,22 +30,22 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.common.acl.*; +import org.apache.kafka.common.acl.AccessControlEntryFilter; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.acl.AclPermissionType; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourcePatternFilter; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class AccessControlListTest extends AbstractIntegrationTest { @@ -48,71 +58,67 @@ class AccessControlListTest extends AbstractIntegrationTest { private String token; - /** - * Init all integration tests - */ @BeforeAll void init() { Namespace ns1 = Namespace.builder() .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .build()) + .name("ns1") + .cluster("test-cluster") + .build()) .spec(NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) + .kafkaUser("user1") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) .build(); RoleBinding rb1 = RoleBinding.builder() .metadata(ObjectMeta.builder() - .name("ns1-rb") - .namespace("ns1") - .build()) + .name("ns1-rb") + .namespace("ns1") + .build()) .spec(RoleBindingSpec.builder() - .role(Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(Verb.POST, Verb.GET)) - .build()) - .subject(Subject.builder() - .subjectName("group1") - .subjectType(SubjectType.GROUP) - .build()) - .build()) + .role(Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(Verb.POST, Verb.GET)) + .build()) + .subject(Subject.builder() + .subjectName("group1") + .subjectType(SubjectType.GROUP) + .build()) + .build()) .build(); - UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin","admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); + UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); + HttpResponse response = + client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); token = response.getBody().get().getAccessToken(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); } - /** - * Validate topic ACL creation - * @throws InterruptedException Any interrupted exception during ACLs synchronization - * @throws ExecutionException Any execution exception during ACLs synchronization - */ @Test - void createTopicReadACL() throws InterruptedException, ExecutionException { + void createTopicReadAcl() throws InterruptedException, ExecutionException { AccessControlEntry aclTopic = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("ns1-acl-topic") - .namespace("ns1") - .build()) + .name("ns1-acl-topic") + .namespace("ns1") + .build()) .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.READ) - .grantedTo("ns1") - .build()) + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.READ) + .grantedTo("ns1") + .build()) .build(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); // Force ACLs synchronization accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -120,19 +126,21 @@ void createTopicReadACL() throws InterruptedException, ExecutionException { Admin kafkaClient = getAdminClient(); AclBindingFilter user1Filter = new AclBindingFilter( - ResourcePatternFilter.ANY, - new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); + ResourcePatternFilter.ANY, + new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); Collection results = kafkaClient.describeAcls(user1Filter).values().get(); AclBinding expected = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, + AclPermissionType.ALLOW)); assertEquals(1, results.size()); assertEquals(expected, results.stream().findFirst().get()); // DELETE the ACL and verify - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-acl-topic").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-topic").bearerAuth(token)); accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -141,66 +149,66 @@ void createTopicReadACL() throws InterruptedException, ExecutionException { assertTrue(results.isEmpty()); } - /** - * Validate public topic ACL creation - * @throws InterruptedException Any interrupted exception during ACLs synchronization - * @throws ExecutionException Any execution exception during ACLs synchronization - */ @Test - void createPublicTopicReadACL() throws InterruptedException, ExecutionException { + void createPublicTopicReadAcl() throws InterruptedException, ExecutionException { AccessControlEntry aclTopicOwner = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-topic-owner") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopicOwner)); + .metadata(ObjectMeta.builder() + .name("ns1-acl-topic-owner") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopicOwner)); // Force ACLs synchronization accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); AccessControlEntry aclTopicPublic = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-public-acl-topic") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.READ) - .grantedTo("*") - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopicPublic)); + .metadata(ObjectMeta.builder() + .name("ns1-public-acl-topic") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.READ) + .grantedTo("*") + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopicPublic)); // Force ACLs synchronization accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); Admin kafkaClient = getAdminClient(); AclBindingFilter publicFilter = new AclBindingFilter(ResourcePatternFilter.ANY, - new AccessControlEntryFilter("User:*", null, AclOperation.ANY, AclPermissionType.ANY)); + new AccessControlEntryFilter("User:*", null, AclOperation.ANY, AclPermissionType.ANY)); Collection results = kafkaClient.describeAcls(publicFilter).values().get(); AclBinding expected = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:*", "*", AclOperation.READ, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:*", "*", AclOperation.READ, + AclPermissionType.ALLOW)); assertEquals(1, results.size()); assertEquals(expected, results.stream().findFirst().get()); // DELETE the ACLs and verify - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-public-acl-topic").bearerAuth(token)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-acl-topic-owner").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-public-acl-topic").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-topic-owner").bearerAuth(token)); accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -209,70 +217,74 @@ void createPublicTopicReadACL() throws InterruptedException, ExecutionException assertTrue(results.isEmpty()); } - /** - * Validate topic ACL creation when the ACL is already in broker but not in Ns4Kafka - * @throws InterruptedException Any interrupted exception during ACLs synchronization - * @throws ExecutionException Any execution exception during ACLs synchronization - */ @Test - void createTopicACLAlreadyExistsInBroker() throws InterruptedException, ExecutionException { + void createTopicAclAlreadyExistsInBroker() throws InterruptedException, ExecutionException { AccessControlEntry aclTopic = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-topic") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.READ) - .grantedTo("ns1") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-acl-topic") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.READ) + .grantedTo("ns1") + .build()) + .build(); AclBinding aclBinding = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, + AclPermissionType.ALLOW)); Admin kafkaClient = getAdminClient(); kafkaClient.createAcls(Collections.singletonList(aclBinding)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); // Force ACLs synchronization accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); AclBindingFilter user1Filter = new AclBindingFilter( - ResourcePatternFilter.ANY, - new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); + ResourcePatternFilter.ANY, + new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); Collection results = kafkaClient.describeAcls(user1Filter).values().get(); assertEquals(1, results.size()); assertEquals(aclBinding, results.stream().findFirst().get()); + + // Remove ACL + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-topic") + .bearerAuth(token)); + + accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); + + results = kafkaClient.describeAcls(user1Filter).values().get(); + + assertTrue(results.isEmpty()); } - /** - * Validate connect ACL creation - * @throws InterruptedException Any interrupted exception during ACLs synchronization - * @throws ExecutionException Any execution exception during ACLs synchronization - */ @Test - void createConnectOwnerACL() throws InterruptedException, ExecutionException { + void createConnectOwnerAcl() throws InterruptedException, ExecutionException { AccessControlEntry aclTopic = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-connect") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.CONNECT) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); + .metadata(ObjectMeta.builder() + .name("ns1-acl-connect") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.CONNECT) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); //force ACL Sync accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -280,19 +292,22 @@ void createConnectOwnerACL() throws InterruptedException, ExecutionException { Admin kafkaClient = getAdminClient(); AclBindingFilter user1Filter = new AclBindingFilter( - ResourcePatternFilter.ANY, - new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); + ResourcePatternFilter.ANY, + new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); Collection results = kafkaClient.describeAcls(user1Filter).values().get(); AclBinding expected = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.GROUP, "connect-ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.GROUP, "connect-ns1-", + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, + AclPermissionType.ALLOW)); assertEquals(1, results.size()); assertEquals(expected, results.stream().findFirst().get()); // DELETE the ACL and verify - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-acl-connect").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-connect").bearerAuth(token)); accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -301,43 +316,40 @@ void createConnectOwnerACL() throws InterruptedException, ExecutionException { assertTrue(results.isEmpty()); } - /** - * Validate Kafka Stream ACL creation - * @throws InterruptedException Any interrupted exception during ACLs synchronization - * @throws ExecutionException Any execution exception during ACLs synchronization - */ @Test - void createStreamACL() throws InterruptedException, ExecutionException { + void createStreamAcl() throws InterruptedException, ExecutionException { AccessControlEntry aclTopic = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-topic") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-acl-topic") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); AccessControlEntry aclGroup = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-group") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.GROUP) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclGroup)); + .metadata(ObjectMeta.builder() + .name("ns1-acl-group") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.GROUP) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclGroup)); //force ACL Sync accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -345,8 +357,8 @@ void createStreamACL() throws InterruptedException, ExecutionException { Admin kafkaClient = getAdminClient(); AclBindingFilter user1Filter = new AclBindingFilter( - ResourcePatternFilter.ANY, - new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); + ResourcePatternFilter.ANY, + new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); Collection results = kafkaClient.describeAcls(user1Filter).values().get(); // Topic ns1- READ @@ -354,29 +366,34 @@ void createStreamACL() throws InterruptedException, ExecutionException { // Group ns1- READ AclBinding ac1 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, + AclPermissionType.ALLOW)); AclBinding ac2 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.WRITE, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.WRITE, + AclPermissionType.ALLOW)); AclBinding ac3 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.GROUP, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.GROUP, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.READ, + AclPermissionType.ALLOW)); AclBinding ac4 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.DESCRIBE_CONFIGS, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-", PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.DESCRIBE_CONFIGS, + AclPermissionType.ALLOW)); assertEquals(4, results.size()); assertTrue(results.containsAll(List.of(ac1, ac2, ac3, ac4))); KafkaStream stream = KafkaStream.builder() - .metadata(ObjectMeta.builder() - .name("ns1-stream1") - .namespace("ns1") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-stream1") + .namespace("ns1") + .build()) + .build(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/streams").bearerAuth(token).body(stream)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/streams").bearerAuth(token).body(stream)); // Force ACLs synchronization accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -389,22 +406,31 @@ void createStreamACL() throws InterruptedException, ExecutionException { // Topic ns1-stream1 CREATE // Topic ns1-stream1 DELETE AclBinding ac5 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-stream1", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.CREATE, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-stream1", + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.CREATE, + AclPermissionType.ALLOW)); AclBinding ac6 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-stream1", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.DELETE, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TOPIC, "ns1-stream1", + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.DELETE, + AclPermissionType.ALLOW)); AclBinding ac7 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, "ns1-stream1", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.WRITE, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, "ns1-stream1", + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.WRITE, + AclPermissionType.ALLOW)); assertEquals(7, results.size()); assertTrue(results.containsAll(List.of(ac1, ac2, ac3, ac4, ac5, ac6, ac7))); // DELETE the Stream & ACL and verify - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/streams/ns1-stream1").bearerAuth(token)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-acl-topic").bearerAuth(token)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-acl-group").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/streams/ns1-stream1").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-topic").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-group").bearerAuth(token)); accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -414,22 +440,23 @@ void createStreamACL() throws InterruptedException, ExecutionException { } @Test - void shouldCreateTransactionalIDOwnerACL() throws InterruptedException, ExecutionException { + void shouldCreateTransactionalIdOwnerAcl() throws InterruptedException, ExecutionException { AccessControlEntry aclTopic = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-transactional-id") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TRANSACTIONAL_ID) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); + .metadata(ObjectMeta.builder() + .name("ns1-acl-transactional-id") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.TRANSACTIONAL_ID) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); // Force ACL Sync accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); @@ -437,23 +464,28 @@ void shouldCreateTransactionalIDOwnerACL() throws InterruptedException, Executio Admin kafkaClient = getAdminClient(); AclBindingFilter user1Filter = new AclBindingFilter(ResourcePatternFilter.ANY, - new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); + new AccessControlEntryFilter("User:user1", null, AclOperation.ANY, AclPermissionType.ANY)); Collection results = kafkaClient.describeAcls(user1Filter).values().get(); AclBinding expected = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.WRITE, AclPermissionType.ALLOW)); + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, "ns1-", + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.WRITE, + AclPermissionType.ALLOW)); AclBinding expected2 = new AclBinding( - new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, "ns1-", PatternType.PREFIXED), - new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)); - + new ResourcePattern(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, "ns1-", + PatternType.PREFIXED), + new org.apache.kafka.common.acl.AccessControlEntry("User:user1", "*", AclOperation.DESCRIBE, + AclPermissionType.ALLOW)); assertEquals(2, results.size()); assertTrue(results.containsAll(List.of(expected, expected2))); - + // DELETE the ACL and verify - client.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/acls/ns1-acl-transactional-id").bearerAuth(token)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/acls/ns1-acl-transactional-id") + .bearerAuth(token)); accessControlEntryAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); diff --git a/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java b/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java index 894c1b42..8117edb1 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/ApiResourcesTest.java @@ -1,5 +1,7 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; + import com.michelin.ns4kafka.controllers.ApiResourcesController; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -15,12 +17,12 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; -import org.junit.jupiter.api.Test; - import java.util.List; +import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; - +/** + * Api resources test. + */ @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class ApiResourcesTest extends AbstractIntegrationTest { @@ -31,12 +33,13 @@ class ApiResourcesTest extends AbstractIntegrationTest { @Test void asAdmin() { UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class); + HttpResponse response = client.toBlocking() + .exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class); String token = response.getBody().get().getAccessToken(); List resources = client.toBlocking().retrieve( - HttpRequest.GET("/api-resources").bearerAuth(token), - Argument.listOf(ApiResourcesController.ResourceDefinition.class)); + HttpRequest.GET("/api-resources").bearerAuth(token), + Argument.listOf(ApiResourcesController.ResourceDefinition.class)); assertEquals(9, resources.size()); } @@ -46,8 +49,8 @@ void asAnonymous() { // This feature is not about restricting access, but easing user experience within the CLI // If the user is not authenticated, show everything List resources = client.toBlocking().retrieve( - HttpRequest.GET("/api-resources"), - Argument.listOf(ApiResourcesController.ResourceDefinition.class)); + HttpRequest.GET("/api-resources"), + Argument.listOf(ApiResourcesController.ResourceDefinition.class)); assertEquals(9, resources.size()); } @@ -55,48 +58,52 @@ void asAnonymous() { @Test void asUser() { Namespace ns1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1") + .cluster("test-cluster") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) + .build(); RoleBinding rb1 = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("ns1-rb") - .namespace("ns1") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(RoleBinding.Verb.POST, RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder() - .subjectName("userGroup") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-rb") + .namespace("ns1") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(RoleBinding.Verb.POST, RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder() + .subjectName("userGroup") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class); + HttpResponse response = client.toBlocking() + .exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class); String token = response.getBody().get().getAccessToken(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); UsernamePasswordCredentials userCredentials = new UsernamePasswordCredentials("user", "admin"); - HttpResponse userResponse = client.toBlocking().exchange(HttpRequest.POST("/login", userCredentials), TopicTest.BearerAccessRefreshToken.class); + HttpResponse userResponse = client.toBlocking() + .exchange(HttpRequest.POST("/login", userCredentials), TopicTest.BearerAccessRefreshToken.class); String userToken = userResponse.getBody().get().getAccessToken(); List resources = client.toBlocking().retrieve( - HttpRequest.GET("/api-resources").bearerAuth(userToken), - Argument.listOf(ApiResourcesController.ResourceDefinition.class)); + HttpRequest.GET("/api-resources").bearerAuth(userToken), + Argument.listOf(ApiResourcesController.ResourceDefinition.class)); assertEquals(2, resources.size()); } diff --git a/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java b/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java index 4d49b032..681b4ba2 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/ConnectTest.java @@ -1,13 +1,25 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.integration.TopicTest.BearerAccessRefreshToken; -import com.michelin.ns4kafka.models.*; +import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.AccessControlEntry.AccessControlEntrySpec; import com.michelin.ns4kafka.models.AccessControlEntry.Permission; import com.michelin.ns4kafka.models.AccessControlEntry.ResourcePatternType; import com.michelin.ns4kafka.models.AccessControlEntry.ResourceType; +import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; -import com.michelin.ns4kafka.models.RoleBinding.*; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.models.RoleBinding.Role; +import com.michelin.ns4kafka.models.RoleBinding.RoleBindingSpec; +import com.michelin.ns4kafka.models.RoleBinding.Subject; +import com.michelin.ns4kafka.models.RoleBinding.SubjectType; +import com.michelin.ns4kafka.models.RoleBinding.Verb; +import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.models.connector.ChangeConnectorState; import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorInfo; @@ -28,18 +40,15 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Flux; - import java.net.MalformedURLException; import java.net.URL; import java.util.HashMap; import java.util.List; import java.util.Map; - -import static org.junit.jupiter.api.Assertions.*; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") @@ -59,82 +68,85 @@ class ConnectTest extends AbstractIntegrationConnectTest { @BeforeAll void init() { Namespace ns1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .build()) - .spec(NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .connectValidator(ConnectValidator.builder() - .validationConstraints(Map.of()) - .sinkValidationConstraints(Map.of()) - .classValidationConstraints(Map.of()) - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1") + .cluster("test-cluster") + .build()) + .spec(NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .connectValidator(ConnectValidator.builder() + .validationConstraints(Map.of()) + .sinkValidationConstraints(Map.of()) + .classValidationConstraints(Map.of()) + .build()) + .build()) + .build(); RoleBinding rb1 = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("ns1-rb") - .namespace("ns1") - .build()) - .spec(RoleBindingSpec.builder() - .role(Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(Verb.POST, Verb.GET)) - .build()) - .subject(Subject.builder() - .subjectName("group1") - .subjectType(SubjectType.GROUP) - .build()) - .build()) - .build(); - - AccessControlEntry aclConnect = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.CONNECT) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); - - AccessControlEntry aclTopic = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl-topic") - .namespace("ns1") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-rb") + .namespace("ns1") + .build()) + .spec(RoleBindingSpec.builder() + .role(Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(Verb.POST, Verb.GET)) + .build()) + .subject(Subject.builder() + .subjectName("group1") + .subjectType(SubjectType.GROUP) + .build()) + .build()) + .build(); UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); + HttpResponse response = + client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); token = response.getBody().get().getAccessToken(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclConnect)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); + + AccessControlEntry aclConnect = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("ns1-acl") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.CONNECT) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclConnect)); + + AccessControlEntry aclTopic = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("ns1-acl-topic") + .namespace("ns1") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclTopic)); } - /** - * Validate connector HTTP client creation - * @throws MalformedURLException Any malformed URL exception - */ @Test void createConnect() throws MalformedURLException { HttpClient connectCli = HttpClient.create(new URL(connect.getUrl())); @@ -142,48 +154,33 @@ void createConnect() throws MalformedURLException { assertEquals("7.4.1-ccs", actual.version()); } - /** - * Validate the namespace creation without connector - */ @Test void createNamespaceWithoutConnect() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns-without-connect") - .cluster("test-cluster") - .build()) - .spec(NamespaceSpec.builder() - .kafkaUser("user-without-connect") - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) - .build(); - - assertDoesNotThrow(() -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns))); + .metadata(ObjectMeta.builder() + .name("ns-without-connect") + .cluster("test-cluster") + .build()) + .spec(NamespaceSpec.builder() + .kafkaUser("user-without-connect") + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) + .build(); + + assertDoesNotThrow(() -> client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns))); } /** * Validate connector deployment * Deploy a topic and connectors and assert the deployments worked. * The test asserts null/empty connector properties are deployed. - * @throws InterruptedException Any interrupted exception + * + * @throws InterruptedException Any interrupted exception * @throws MalformedURLException Any malformed URL exception */ @Test void deployConnectors() throws InterruptedException, MalformedURLException { - Topic to = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-to1") - .namespace("ns1") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); - Map connectorSpecs = new HashMap<>(); connectorSpecs.put("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector"); connectorSpecs.put("tasks.max", "1"); @@ -191,53 +188,76 @@ void deployConnectors() throws InterruptedException, MalformedURLException { connectorSpecs.put("file", null); Connector connectorWithNullParameter = Connector.builder() - .metadata(ObjectMeta.builder() - .name("ns1-connectorWithNullParameter") - .namespace("ns1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("test-connect") - .config(connectorSpecs) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-connectorWithNullParameter") + .namespace("ns1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("test-connect") + .config(connectorSpecs) + .build()) + .build(); + + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name("ns1-to1") + .namespace("ns1") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topic)); + topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token) + .body(connectorWithNullParameter)); Connector connectorWithEmptyParameter = Connector.builder() - .metadata(ObjectMeta.builder() - .name("ns1-connectorWithEmptyParameter") - .namespace("ns1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("test-connect") - .config(Map.of( - "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", - "tasks.max", "1", - "topics", "ns1-to1", - "file", "" - )) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-connectorWithEmptyParameter") + .namespace("ns1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("test-connect") + .config(Map.of( + "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", + "tasks.max", "1", + "topics", "ns1-to1", + "file", "" + )) + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token) + .body(connectorWithEmptyParameter)); Connector connectorWithFillParameter = Connector.builder() - .metadata(ObjectMeta.builder() - .name("ns1-connectorWithFillParameter") - .namespace("ns1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("test-connect") - .config(Map.of( - "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", - "tasks.max", "1", - "topics", "ns1-to1", - "file", "test" - )) - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)); - topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(connectorWithNullParameter)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(connectorWithEmptyParameter)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(connectorWithFillParameter)); + .metadata(ObjectMeta.builder() + .name("ns1-connectorWithFillParameter") + .namespace("ns1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("test-connect") + .config(Map.of( + "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", + "tasks.max", "1", + "topics", "ns1-to1", + "file", "test" + )) + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token) + .body(connectorWithFillParameter)); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::runHealthCheck).subscribe(); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::run).subscribe(); @@ -245,52 +265,40 @@ void deployConnectors() throws InterruptedException, MalformedURLException { Thread.sleep(2000); HttpClient connectCli = HttpClient.create(new URL(connect.getUrl())); - ConnectorInfo actualConnectorWithNullParameter = connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-connectorWithNullParameter"), ConnectorInfo.class); - ConnectorInfo actualConnectorWithEmptyParameter = connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-connectorWithEmptyParameter"), ConnectorInfo.class); - ConnectorInfo actualConnectorWithFillParameter = connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-connectorWithFillParameter"), ConnectorInfo.class); // "File" property is present, but null + ConnectorInfo actualConnectorWithNullParameter = connectCli.toBlocking() + .retrieve(HttpRequest.GET("/connectors/ns1-connectorWithNullParameter"), ConnectorInfo.class); + assertTrue(actualConnectorWithNullParameter.config().containsKey("file")); Assertions.assertNull(actualConnectorWithNullParameter.config().get("file")); // "File" property is present, but empty + ConnectorInfo actualConnectorWithEmptyParameter = connectCli.toBlocking() + .retrieve(HttpRequest.GET("/connectors/ns1-connectorWithEmptyParameter"), ConnectorInfo.class); + assertTrue(actualConnectorWithEmptyParameter.config().containsKey("file")); assertTrue(actualConnectorWithEmptyParameter.config().get("file").isEmpty()); // "File" property is present + ConnectorInfo actualConnectorWithFillParameter = connectCli.toBlocking() + .retrieve(HttpRequest.GET("/connectors/ns1-connectorWithFillParameter"), ConnectorInfo.class); + assertTrue(actualConnectorWithFillParameter.config().containsKey("file")); assertEquals("test", actualConnectorWithFillParameter.config().get("file")); } - /** - * Validate connector update when connector is already deployed - * in the cluster and it is updated with a null property - * @throws InterruptedException Any interrupted exception - * @throws MalformedURLException Any malformed URL exception - */ @Test void updateConnectorsWithNullProperty() throws InterruptedException, MalformedURLException { - Topic to = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-to1") - .namespace("ns1") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + ConnectorSpecs connectorSpecs = ConnectorSpecs.builder() - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", - "tasks.max", "1", - "topics", "ns1-to1", - "file", "test" - )) - .build(); + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", + "tasks.max", "1", + "topics", "ns1-to1", + "file", "test" + )) + .build(); Map updatedConnectorSpecs = new HashMap<>(); updatedConnectorSpecs.put("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector"); @@ -298,79 +306,98 @@ void updateConnectorsWithNullProperty() throws InterruptedException, MalformedUR updatedConnectorSpecs.put("topics", "ns1-to1"); updatedConnectorSpecs.put("file", null); - Connector updateConnector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("ns1-connector") - .namespace("ns1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("test-connect") - .config(updatedConnectorSpecs) - .build()) - .build(); - HttpClient connectCli = HttpClient.create(new URL(connect.getUrl())); - HttpResponse connectorInfo = connectCli.toBlocking().exchange(HttpRequest.PUT("/connectors/ns1-connector/config", connectorSpecs), ConnectorInfo.class); + HttpResponse connectorInfo = connectCli.toBlocking() + .exchange(HttpRequest.PUT("/connectors/ns1-connector/config", connectorSpecs), ConnectorInfo.class); // "File" property is present and fill assertTrue(connectorInfo.getBody().isPresent()); assertTrue(connectorInfo.getBody().get().config().containsKey("file")); assertEquals("test", connectorInfo.getBody().get().config().get("file")); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)); + Topic topic = Topic.builder() + .metadata(ObjectMeta.builder() + .name("ns1-to1") + .namespace("ns1") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topic)); + topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(updateConnector)); + + Connector updateConnector = Connector.builder() + .metadata(ObjectMeta.builder() + .name("ns1-connector") + .namespace("ns1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("test-connect") + .config(updatedConnectorSpecs) + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token) + .body(updateConnector)); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::runHealthCheck).subscribe(); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::run).subscribe(); Thread.sleep(2000); - ConnectorInfo actualConnector = connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-connector"), ConnectorInfo.class); + ConnectorInfo actualConnector = + connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-connector"), ConnectorInfo.class); // "File" property is present, but null assertTrue(actualConnector.config().containsKey("file")); Assertions.assertNull(actualConnector.config().get("file")); } - /** - * Validate the connector restart - * @throws InterruptedException Any interrupted exception - */ @Test void restartConnector() throws InterruptedException { Topic to = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-to1") - .namespace("ns1") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-to1") + .namespace("ns1") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); Connector co = Connector.builder() - .metadata(ObjectMeta.builder() - .name("ns1-co1") - .namespace("ns1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("test-connect") - .config(Map.of( - "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", - "tasks.max", "1", - "topics", "ns1-to1" - )) - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)); + .metadata(ObjectMeta.builder() + .name("ns1-co1") + .namespace("ns1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("test-connect") + .config(Map.of( + "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", + "tasks.max", "1", + "topics", "ns1-to1" + )) + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(co)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(co)); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::runHealthCheck).subscribe(); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::run).subscribe(); @@ -378,54 +405,53 @@ void restartConnector() throws InterruptedException { Thread.sleep(2000); ChangeConnectorState restartState = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("ns1-co1").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.restart).build()) - .build(); - - HttpResponse actual = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co1/change-state").bearerAuth(token).body(restartState), ChangeConnectorState.class); + .metadata(ObjectMeta.builder().name("ns1-co1").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.restart).build()) + .build(); + + HttpResponse actual = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co1/change-state").bearerAuth(token) + .body(restartState), ChangeConnectorState.class); assertEquals(HttpStatus.OK, actual.status()); } - /** - * Validate connector pause and resume - * @throws MalformedURLException Any malformed URL exception - * @throws InterruptedException Any interrupted exception - */ @Test void pauseAndResumeConnector() throws MalformedURLException, InterruptedException { - HttpClient connectCli = HttpClient.create(new URL(connect.getUrl())); Topic to = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-to1") - .namespace("ns1") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-to1") + .namespace("ns1") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); Connector co = Connector.builder() - .metadata(ObjectMeta.builder() - .name("ns1-co2") - .namespace("ns1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("test-connect") - .config(Map.of( - "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", - "tasks.max", "3", - "topics", "ns1-to1" - )) - .build()) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)); + .metadata(ObjectMeta.builder() + .name("ns1-co2") + .namespace("ns1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("test-connect") + .config(Map.of( + "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector", + "tasks.max", "3", + "topics", "ns1-to1" + )) + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(to)); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(co)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors").bearerAuth(token).body(co)); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::runHealthCheck).subscribe(); Flux.fromIterable(connectorAsyncExecutorList).flatMap(ConnectorAsyncExecutor::run).subscribe(); @@ -434,15 +460,19 @@ void pauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio // pause the connector ChangeConnectorState pauseState = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("ns1-co2").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.pause).build()) - .build(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co2/change-state").bearerAuth(token).body(pauseState)); + .metadata(ObjectMeta.builder().name("ns1-co2").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.pause).build()) + .build(); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co2/change-state").bearerAuth(token) + .body(pauseState)); Thread.sleep(2000); - // verify paused directly on connect cluster - ConnectorStateInfo actual = connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-co2/status"), ConnectorStateInfo.class); + HttpClient connectCli = HttpClient.create(new URL(connect.getUrl())); + ConnectorStateInfo actual = + connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-co2/status"), ConnectorStateInfo.class); assertEquals("PAUSED", actual.connector().getState()); assertEquals("PAUSED", actual.tasks().get(0).getState()); assertEquals("PAUSED", actual.tasks().get(1).getState()); @@ -450,15 +480,19 @@ void pauseAndResumeConnector() throws MalformedURLException, InterruptedExceptio // resume the connector ChangeConnectorState resumeState = ChangeConnectorState.builder() - .metadata(ObjectMeta.builder().name("ns1-co2").build()) - .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder().action(ChangeConnectorState.ConnectorAction.resume).build()) - .build(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co2/change-state").bearerAuth(token).body(resumeState)); + .metadata(ObjectMeta.builder().name("ns1-co2").build()) + .spec(ChangeConnectorState.ChangeConnectorStateSpec.builder() + .action(ChangeConnectorState.ConnectorAction.resume).build()) + .build(); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/connectors/ns1-co2/change-state").bearerAuth(token) + .body(resumeState)); Thread.sleep(2000); // verify resumed directly on connect cluster - actual = connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-co2/status"), ConnectorStateInfo.class); + actual = + connectCli.toBlocking().retrieve(HttpRequest.GET("/connectors/ns1-co2/status"), ConnectorStateInfo.class); assertEquals("RUNNING", actual.connector().getState()); assertEquals("RUNNING", actual.tasks().get(0).getState()); assertEquals("RUNNING", actual.tasks().get(1).getState()); diff --git a/src/test/java/com/michelin/ns4kafka/integration/ExceptionHandlerTest.java b/src/test/java/com/michelin/ns4kafka/integration/ExceptionHandlerTest.java index e17235c3..e7f119f2 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/ExceptionHandlerTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/ExceptionHandlerTest.java @@ -1,13 +1,25 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + import com.michelin.ns4kafka.integration.TopicTest.BearerAccessRefreshToken; -import com.michelin.ns4kafka.models.*; +import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.AccessControlEntry.AccessControlEntrySpec; import com.michelin.ns4kafka.models.AccessControlEntry.Permission; import com.michelin.ns4kafka.models.AccessControlEntry.ResourcePatternType; import com.michelin.ns4kafka.models.AccessControlEntry.ResourceType; +import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; -import com.michelin.ns4kafka.models.RoleBinding.*; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.models.RoleBinding.Role; +import com.michelin.ns4kafka.models.RoleBinding.RoleBindingSpec; +import com.michelin.ns4kafka.models.RoleBinding.Subject; +import com.michelin.ns4kafka.models.RoleBinding.SubjectType; +import com.michelin.ns4kafka.models.RoleBinding.Verb; +import com.michelin.ns4kafka.models.Status; +import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.models.Topic.TopicSpec; import com.michelin.ns4kafka.validation.TopicValidator; import io.micronaut.context.annotation.Property; @@ -21,15 +33,14 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; - import java.util.List; import java.util.Map; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - +/** + * Integration tests for ExceptionHandler. + */ @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class ExceptionHandlerTest extends AbstractIntegrationTest { @@ -43,96 +54,102 @@ class ExceptionHandlerTest extends AbstractIntegrationTest { void init() { Namespace ns1 = Namespace.builder() .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .build()) + .name("ns1") + .cluster("test-cluster") + .build()) .spec(NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) + .kafkaUser("user1") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) .build(); RoleBinding rb1 = RoleBinding.builder() .metadata(ObjectMeta.builder() - .name("ns1-rb") - .namespace("ns1") - .build()) + .name("ns1-rb") + .namespace("ns1") + .build()) .spec(RoleBindingSpec.builder() - .role(Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(Verb.POST, Verb.GET)) - .build()) - .subject(Subject.builder() - .subjectName("group1") - .subjectType(SubjectType.GROUP) - .build()) - .build()) + .role(Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(Verb.POST, Verb.GET)) + .build()) + .subject(Subject.builder() + .subjectName("group1") + .subjectType(SubjectType.GROUP) + .build()) + .build()) .build(); + UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); + HttpResponse response = + client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); + + token = response.getBody().get().getAccessToken(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); + AccessControlEntry ns1acl = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("ns1-acl") - .namespace("ns1") - .build()) + .name("ns1-acl") + .namespace("ns1") + .build()) .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) .build(); - UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin","admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); - - token = response.getBody().get().getAccessToken(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(ns1acl)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(ns1acl)); } @Test void invalidTopicName() { Topic topicFirstCreate = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-invalid-é") - .namespace("ns1") - .build()) - .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-invalid-é") + .namespace("ns1") + .build()) + .spec(TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics") - .bearerAuth(token) - .body(topicFirstCreate))); + () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics") + .bearerAuth(token) + .body(topicFirstCreate))); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, exception.getStatus()); assertEquals("Invalid Resource", exception.getMessage()); - assertEquals("topic.metadata.name: must match \"^[a-zA-Z0-9_.-]+$\"", exception.getResponse().getBody(Status.class).get().getDetails().getCauses().get(0)); + assertEquals("topic.metadata.name: must match \"^[a-zA-Z0-9_.-]+$\"", + exception.getResponse().getBody(Status.class).get().getDetails().getCauses().get(0)); } @Test void forbiddenTopic() { HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns2/topics") - .bearerAuth(token))); + () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns2/topics") + .bearerAuth(token))); assertEquals(HttpStatus.FORBIDDEN, exception.getStatus()); assertEquals("Resource forbidden", exception.getMessage()); } @Test - void UnauthorizedTopic() { + void unauthorizedTopic() { HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns1/topics"))); + () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns1/topics"))); assertEquals(HttpStatus.UNAUTHORIZED, exception.getStatus()); assertEquals("Client '/': Unauthorized", exception.getMessage()); @@ -141,8 +158,9 @@ void UnauthorizedTopic() { @Test void notFoundTopic() { HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns1/topics/not-found-topic") - .bearerAuth(token))); + () -> client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns1/topics/not-found-topic") + .bearerAuth(token))); assertEquals(HttpStatus.NOT_FOUND, exception.getStatus()); assertEquals("Not Found", exception.getMessage()); @@ -151,8 +169,8 @@ void notFoundTopic() { @Test void notValidMethodTopic() { HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.PUT,"/api/namespaces/ns1/topics/") - .bearerAuth(token))); + () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.PUT, "/api/namespaces/ns1/topics/") + .bearerAuth(token))); assertEquals(HttpStatus.FORBIDDEN, exception.getStatus()); assertEquals("Resource forbidden", exception.getMessage()); diff --git a/src/test/java/com/michelin/ns4kafka/integration/LoginTest.java b/src/test/java/com/michelin/ns4kafka/integration/LoginTest.java index f69b3c7f..1fc87f5c 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/LoginTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/LoginTest.java @@ -1,5 +1,7 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; + import io.micronaut.context.annotation.Property; import io.micronaut.http.HttpRequest; import io.micronaut.http.HttpResponse; @@ -11,8 +13,6 @@ import jakarta.inject.Inject; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; - @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class LoginTest extends AbstractIntegrationTest { @@ -22,8 +22,9 @@ class LoginTest extends AbstractIntegrationTest { @Test void login() { - UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin","admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), String.class); + UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); + HttpResponse response = + client.toBlocking().exchange(HttpRequest.POST("/login", credentials), String.class); assertEquals(HttpStatus.OK, response.status()); } } diff --git a/src/test/java/com/michelin/ns4kafka/integration/SchemaTest.java b/src/test/java/com/michelin/ns4kafka/integration/SchemaTest.java index 15d7bf1e..42270d28 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/SchemaTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/SchemaTest.java @@ -1,5 +1,9 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.integration.TopicTest.BearerAccessRefreshToken; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; @@ -23,17 +27,12 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; +import java.util.List; +import java.util.Map; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.List; -import java.util.Map; - -import static org.junit.jupiter.api.Assertions.*; - @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class SchemaTest extends AbstractIntegrationSchemaRegistryTest { @@ -48,240 +47,237 @@ class SchemaTest extends AbstractIntegrationSchemaRegistryTest { private String token; - /** - * Init all integration tests - */ @BeforeAll void init() { schemaRegistryClient = applicationContext.createBean(HttpClient.class, schemaRegistryContainer.getUrl()); Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1") + .cluster("test-cluster") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .build()) + .build(); RoleBinding roleBinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("ns1-rb") - .namespace("ns1") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(RoleBinding.Verb.POST, RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder() - .subjectName("group1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-rb") + .namespace("ns1") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(RoleBinding.Verb.POST, RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder() + .subjectName("group1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); - AccessControlEntry aclSchema = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("ns1-acl") - .namespace("ns1") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("ns1") - .build()) - .build(); UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - HttpResponse response = ns4KafkaClient.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); + HttpResponse response = ns4KafkaClient.toBlocking() + .exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); token = response.getBody().get().getAccessToken(); - ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(namespace)); - ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(roleBinding)); - ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclSchema)); + ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(namespace)); + ns4KafkaClient.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token) + .body(roleBinding)); + + AccessControlEntry aclSchema = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("ns1-acl") + .namespace("ns1") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("ns1") + .build()) + .build(); + + ns4KafkaClient.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclSchema)); } - /** - * Test the schema update with a compatible v2 schema - * - Register the schema v1 - * - Update the compatibility to forward - * - Register the compatible schema v2 - * - Assert success - */ @Test void registerSchemaCompatibility() { // Register schema, first name is optional Schema schema = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-subject0-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); - - var createResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(schema), Schema.class); + .metadata(ObjectMeta.builder() + .name("ns1-subject0-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":" + + "[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":" + + "[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}]," + + "\"default\":null,\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); + + var createResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(schema), Schema.class); assertEquals("created", createResponse.header("X-Ns4kafka-Result")); - + SchemaResponse actual = schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/subjects/ns1-subject0-value/versions/latest"), SchemaResponse.class); + .retrieve(HttpRequest.GET("/subjects/ns1-subject0-value/versions/latest"), SchemaResponse.class); Assertions.assertNotNull(actual.id()); assertEquals(1, actual.version()); assertEquals("ns1-subject0-value", actual.subject()); // Set compat to forward - ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas/ns1-subject0-value/config") - .bearerAuth(token) - .body(Map.of("compatibility", Schema.Compatibility.FORWARD)), SchemaCompatibilityState.class); + ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas/ns1-subject0-value/config") + .bearerAuth(token) + .body(Map.of("compatibility", Schema.Compatibility.FORWARD)), SchemaCompatibilityState.class); SchemaCompatibilityResponse updatedConfig = schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/config/ns1-subject0-value"), - SchemaCompatibilityResponse.class); + .retrieve(HttpRequest.GET("/config/ns1-subject0-value"), + SchemaCompatibilityResponse.class); assertEquals(Schema.Compatibility.FORWARD, updatedConfig.compatibilityLevel()); // Register compatible schema v2, removing optional "first name" field Schema incompatibleSchema = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-subject0-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); - - var createV2Response = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(incompatibleSchema), Schema.class); + .metadata(ObjectMeta.builder() + .name("ns1-subject0-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":" + + "[{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\"," + + "\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":" + + "\"Date of birth of the person\"}]}") + .build()) + .build(); + + var createV2Response = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(incompatibleSchema), Schema.class); assertEquals("changed", createV2Response.header("X-Ns4kafka-Result")); SchemaResponse actualV2 = schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/subjects/ns1-subject0-value/versions/latest"), - SchemaResponse.class); + .retrieve(HttpRequest.GET("/subjects/ns1-subject0-value/versions/latest"), + SchemaResponse.class); Assertions.assertNotNull(actualV2.id()); assertEquals(2, actualV2.version()); assertEquals("ns1-subject0-value", actualV2.subject()); } - /** - * Test the schema update with an incompatible v2 schema - * - Register the schema v1 - * - Update the compatibility to forward - * - Register the incompatible schema v2 - * - Assert errors - */ @Test void registerSchemaIncompatibility() { // Register schema, first name is non-optional Schema schema = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-subject1-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"string\"],\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); - - var createResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(schema), Schema.class); + .metadata(ObjectMeta.builder() + .name("ns1-subject1-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"string\"]," + + "\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); + + var createResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(schema), Schema.class); assertEquals("created", createResponse.header("X-Ns4kafka-Result")); SchemaResponse actual = schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/subjects/ns1-subject1-value/versions/latest"), - SchemaResponse.class); + .retrieve(HttpRequest.GET("/subjects/ns1-subject1-value/versions/latest"), + SchemaResponse.class); Assertions.assertNotNull(actual.id()); assertEquals(1, actual.version()); assertEquals("ns1-subject1-value", actual.subject()); // Set compat to forward - ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas/ns1-subject1-value/config") - .bearerAuth(token) - .body(Map.of("compatibility", Schema.Compatibility.FORWARD)), SchemaCompatibilityState.class); + ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas/ns1-subject1-value/config") + .bearerAuth(token) + .body(Map.of("compatibility", Schema.Compatibility.FORWARD)), SchemaCompatibilityState.class); SchemaCompatibilityResponse updatedConfig = schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/config/ns1-subject1-value"), - SchemaCompatibilityResponse.class); + .retrieve(HttpRequest.GET("/config/ns1-subject1-value"), + SchemaCompatibilityResponse.class); assertEquals(Schema.Compatibility.FORWARD, updatedConfig.compatibilityLevel()); // Register incompatible schema v2, removing non-optional "first name" field Schema incompatibleSchema = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-subject1-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-subject1-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\",\"fields\":[" + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); HttpClientResponseException incompatibleActual = assertThrows(HttpClientResponseException.class, - () -> ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(incompatibleSchema))); + () -> ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(incompatibleSchema))); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, incompatibleActual.getStatus()); assertEquals("Invalid Schema ns1-subject1-value", incompatibleActual.getMessage()); } - /** - * Schema creation with references - */ @Test void registerSchemaWithReferences() { Schema schemaHeader = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-header-subject-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"HeaderAvro\",\"fields\":[{\"name\":\"id\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"ID of the header\"}]}") - .build()) - .build(); - - Schema schemaPersonWithoutRefs = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-person-subject-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"header\",\"type\":[\"null\",\"com.michelin.kafka.producer.showcase.avro.HeaderAvro\"],\"default\":null,\"doc\":\"Header of the persone\"},{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); - - Schema schemaPersonWithRefs = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-person-subject-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"header\",\"type\":[\"null\",\"com.michelin.kafka.producer.showcase.avro.HeaderAvro\"],\"default\":null,\"doc\":\"Header of the persone\"},{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .references(List.of(Schema.SchemaSpec.Reference.builder() - .name("com.michelin.kafka.producer.showcase.avro.HeaderAvro") - .subject("ns1-header-subject-value") - .version(1) - .build())) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-header-subject-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"HeaderAvro\",\"fields\":[{\"name\":\"id\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"ID of the header\"}]}") + .build()) + .build(); // Header created - var headerCreateResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(schemaHeader), Schema.class); + var headerCreateResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(schemaHeader), Schema.class); assertEquals("created", headerCreateResponse.header("X-Ns4kafka-Result")); - SchemaResponse actualHeader = schemaRegistryClient.toBlocking().retrieve(HttpRequest.GET("/subjects/ns1-header-subject-value/versions/latest"), + SchemaResponse actualHeader = schemaRegistryClient.toBlocking() + .retrieve(HttpRequest.GET("/subjects/ns1-header-subject-value/versions/latest"), SchemaResponse.class); Assertions.assertNotNull(actualHeader.id()); @@ -289,22 +285,67 @@ void registerSchemaWithReferences() { assertEquals("ns1-header-subject-value", actualHeader.subject()); // Person without refs not created + Schema schemaPersonWithoutRefs = Schema.builder() + .metadata(ObjectMeta.builder() + .name("ns1-person-subject-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\"," + + "\"fields\":[{\"name\":\"header\",\"type\":[\"null\"," + + "\"com.michelin.kafka.producer.showcase.avro.HeaderAvro\"]," + + "\"default\":null,\"doc\":\"Header of the person\"},{\"name\":\"firstName\"," + + "\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\"," + + "\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\"," + + "\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null" + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); + HttpClientResponseException createException = assertThrows(HttpClientResponseException.class, - () -> ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(schemaPersonWithoutRefs))); + () -> ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(schemaPersonWithoutRefs))); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, createException.getStatus()); assertEquals("Invalid Schema ns1-person-subject-value", createException.getMessage()); // Person with refs created - var personCreateResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(schemaPersonWithRefs), Schema.class); + Schema schemaPersonWithRefs = Schema.builder() + .metadata(ObjectMeta.builder() + .name("ns1-person-subject-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"header\",\"type\":[\"null\"," + + "\"com.michelin.kafka.producer.showcase.avro.HeaderAvro\"]," + + "\"default\":null,\"doc\":\"Header of the person\"},{\"name\":\"firstName\"," + + "\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":" + + "\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\"," + + "\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .references(List.of(Schema.SchemaSpec.Reference.builder() + .name("com.michelin.kafka.producer.showcase.avro.HeaderAvro") + .subject("ns1-header-subject-value") + .version(1) + .build())) + .build()) + .build(); + + var personCreateResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(schemaPersonWithRefs), Schema.class); assertEquals("created", personCreateResponse.header("X-Ns4kafka-Result")); - SchemaResponse actualPerson = schemaRegistryClient.toBlocking().retrieve(HttpRequest.GET("/subjects/ns1-person-subject-value/versions/latest"), + SchemaResponse actualPerson = schemaRegistryClient.toBlocking() + .retrieve(HttpRequest.GET("/subjects/ns1-person-subject-value/versions/latest"), SchemaResponse.class); Assertions.assertNotNull(actualPerson.id()); @@ -312,94 +353,108 @@ void registerSchemaWithReferences() { assertEquals("ns1-person-subject-value", actualPerson.subject()); } - /** - * Schema creation with prefix that does not respect ACLs - */ @Test void registerSchemaWrongPrefix() { Schema wrongSchema = Schema.builder() - .metadata(ObjectMeta.builder() - .name("wrongprefix-subject") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the personnn\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("wrongprefix-subject") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); HttpClientResponseException createException = assertThrows(HttpClientResponseException.class, - () -> ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(wrongSchema))); + () -> ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(wrongSchema))); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, createException.getStatus()); assertEquals("Invalid Schema wrongprefix-subject", createException.getMessage()); // Get all schemas - var getResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns1/schemas") + var getResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns1/schemas") .bearerAuth(token), Argument.listOf(SchemaList.class)); assertTrue(getResponse.getBody().isPresent()); assertTrue(getResponse.getBody().get() - .stream() - .noneMatch(schemaList -> schemaList.getMetadata().getName().equals("wrongprefix-subject"))); + .stream() + .noneMatch(schemaList -> schemaList.getMetadata().getName().equals("wrongprefix-subject"))); HttpClientResponseException getException = assertThrows(HttpClientResponseException.class, - () -> schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/subjects/wrongprefix-subject/versions/latest"), - SchemaResponse.class)); + () -> schemaRegistryClient.toBlocking() + .retrieve(HttpRequest.GET("/subjects/wrongprefix-subject/versions/latest"), + SchemaResponse.class)); assertEquals(HttpStatus.NOT_FOUND, getException.getStatus()); } - /** - * Schema creation and deletion - */ @Test void registerSchema() { Schema schema = Schema.builder() - .metadata(ObjectMeta.builder() - .name("ns1-subject2-value") - .build()) - .spec(Schema.SchemaSpec.builder() - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the personnn\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-subject2-value") + .build()) + .spec(Schema.SchemaSpec.builder() + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); // Apply schema - var createResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/schemas") - .bearerAuth(token) - .body(schema), Schema.class); + var createResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/schemas") + .bearerAuth(token) + .body(schema), Schema.class); assertEquals("created", createResponse.header("X-Ns4kafka-Result")); // Get all schemas - var getResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns1/schemas") + var getResponse = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns1/schemas") .bearerAuth(token), Argument.listOf(SchemaList.class)); assertTrue(getResponse.getBody().isPresent()); assertTrue(getResponse.getBody().get() - .stream() - .anyMatch(schemaList -> schemaList.getMetadata().getName().equals("ns1-subject2-value"))); + .stream() + .anyMatch(schemaList -> schemaList.getMetadata().getName().equals("ns1-subject2-value"))); // Delete schema - var deleteResponse = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.DELETE,"/api/namespaces/ns1/schemas/ns1-subject2-value") - .bearerAuth(token), Schema.class); + var deleteResponse = ns4KafkaClient.toBlocking() + .exchange(HttpRequest.create(HttpMethod.DELETE, "/api/namespaces/ns1/schemas/ns1-subject2-value") + .bearerAuth(token), Schema.class); assertEquals(HttpStatus.NO_CONTENT, deleteResponse.getStatus()); // Get all schemas - var getResponseEmpty = ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns1/schemas") + var getResponseEmpty = + ns4KafkaClient.toBlocking().exchange(HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns1/schemas") .bearerAuth(token), Argument.listOf(SchemaList.class)); assertTrue(getResponseEmpty.getBody().isPresent()); assertTrue(getResponseEmpty.getBody().get() - .stream() - .noneMatch(schemaList -> schemaList.getMetadata().getName().equals("ns1-subject2-value"))); + .stream() + .noneMatch(schemaList -> schemaList.getMetadata().getName().equals("ns1-subject2-value"))); HttpClientResponseException getException = assertThrows(HttpClientResponseException.class, - () -> schemaRegistryClient.toBlocking() - .retrieve(HttpRequest.GET("/subjects/ns1-subject2-value/versions/latest"), - SchemaResponse.class)); + () -> schemaRegistryClient.toBlocking() + .retrieve(HttpRequest.GET("/subjects/ns1-subject2-value/versions/latest"), + SchemaResponse.class)); assertEquals(HttpStatus.NOT_FOUND, getException.getStatus()); } diff --git a/src/test/java/com/michelin/ns4kafka/integration/StreamTest.java b/src/test/java/com/michelin/ns4kafka/integration/StreamTest.java index a2baaeab..ba89ce4a 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/StreamTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/StreamTest.java @@ -1,5 +1,8 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.integration.TopicTest.BearerAccessRefreshToken; import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.AccessControlEntry.AccessControlEntrySpec; @@ -21,6 +24,8 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; +import java.util.List; +import java.util.concurrent.ExecutionException; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.common.acl.AccessControlEntryFilter; import org.apache.kafka.common.acl.AclBindingFilter; @@ -30,12 +35,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class StreamTest extends AbstractIntegrationTest { @@ -52,84 +51,89 @@ class StreamTest extends AbstractIntegrationTest { void init() { Namespace ns1 = Namespace.builder() .metadata(ObjectMeta.builder() - .name("nskafkastream") - .cluster("test-cluster") - .build()) + .name("nskafkastream") + .cluster("test-cluster") + .build()) .spec(NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) + .kafkaUser("user1") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) .build(); AccessControlEntry acl1 = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("nskafkastream-acl-topic") - .build()) + .name("nskafkastream-acl-topic") + .build()) .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("kstream-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("nskafkastream") - .build()) + .resourceType(ResourceType.TOPIC) + .resource("kstream-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("nskafkastream") + .build()) .build(); - AccessControlEntry acl2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("nskafkastream-acl-group") - .build()) - .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.GROUP) - .resource("kstream-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("nskafkastream") - .build()) - .build(); - - - UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin","admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); + UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); + HttpResponse response = + client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); token = response.getBody().get().getAccessToken(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/nskafkastream/acls").bearerAuth(token).body(acl1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/nskafkastream/acls").bearerAuth(token).body(acl2)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/nskafkastream/acls").bearerAuth(token).body(acl1)); + + AccessControlEntry acl2 = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("nskafkastream-acl-group") + .build()) + .spec(AccessControlEntrySpec.builder() + .resourceType(ResourceType.GROUP) + .resource("kstream-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("nskafkastream") + .build()) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/nskafkastream/acls").bearerAuth(token).body(acl2)); } @Test void verifyCreationOfAcl() throws InterruptedException, ExecutionException { KafkaStream stream = KafkaStream.builder() - .metadata(ObjectMeta.builder() - .name("kstream-test") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("kstream-test") + .build()) + .build(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/nskafkastream/streams") - .bearerAuth(token) - .body(stream)); + client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/nskafkastream/streams") + .bearerAuth(token) + .body(stream)); //force ACL Sync aceAsyncExecutorList.forEach(AccessControlEntryAsyncExecutor::run); Admin kafkaClient = getAdminClient(); var aclTopic = kafkaClient.describeAcls(new AclBindingFilter( - new ResourcePatternFilter(org.apache.kafka.common.resource.ResourceType.TOPIC, - stream.getMetadata().getName(), - PatternType.PREFIXED), - AccessControlEntryFilter.ANY)).values().get(); + new ResourcePatternFilter(org.apache.kafka.common.resource.ResourceType.TOPIC, + stream.getMetadata().getName(), + PatternType.PREFIXED), + AccessControlEntryFilter.ANY)).values().get(); var aclTransactionalId = kafkaClient.describeAcls(new AclBindingFilter( - new ResourcePatternFilter(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, - stream.getMetadata().getName(), - PatternType.PREFIXED), - AccessControlEntryFilter.ANY)).values().get(); + new ResourcePatternFilter(org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID, + stream.getMetadata().getName(), + PatternType.PREFIXED), + AccessControlEntryFilter.ANY)).values().get(); assertEquals(2, aclTopic.size()); assertTrue(aclTopic.stream() - .allMatch(aclBinding -> List.of(AclOperation.CREATE, AclOperation.DELETE).contains(aclBinding.entry().operation()))); + .allMatch(aclBinding -> List.of(AclOperation.CREATE, AclOperation.DELETE) + .contains(aclBinding.entry().operation()))); assertEquals(1, aclTransactionalId.size()); assertEquals(AclOperation.WRITE, aclTransactionalId.stream().findFirst().get().entry().operation()); diff --git a/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java b/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java index 48fac0c3..9f09a446 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/TopicTest.java @@ -1,14 +1,29 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + import com.fasterxml.jackson.annotation.JsonProperty; import com.michelin.ns4kafka.controllers.AkhqClaimProviderController; -import com.michelin.ns4kafka.models.*; +import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.AccessControlEntry.AccessControlEntrySpec; import com.michelin.ns4kafka.models.AccessControlEntry.Permission; import com.michelin.ns4kafka.models.AccessControlEntry.ResourcePatternType; import com.michelin.ns4kafka.models.AccessControlEntry.ResourceType; +import com.michelin.ns4kafka.models.DeleteRecordsResponse; +import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; -import com.michelin.ns4kafka.models.RoleBinding.*; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.models.RoleBinding.Role; +import com.michelin.ns4kafka.models.RoleBinding.RoleBindingSpec; +import com.michelin.ns4kafka.models.RoleBinding.Subject; +import com.michelin.ns4kafka.models.RoleBinding.SubjectType; +import com.michelin.ns4kafka.models.RoleBinding.Verb; +import com.michelin.ns4kafka.models.Status; +import com.michelin.ns4kafka.models.Topic; import com.michelin.ns4kafka.models.Topic.TopicSpec; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import com.michelin.ns4kafka.validation.TopicValidator; @@ -24,6 +39,11 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @@ -34,14 +54,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.*; - @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class TopicTest extends AbstractIntegrationTest { @@ -54,240 +66,242 @@ class TopicTest extends AbstractIntegrationTest { private String token; - /** - * Init all integration tests - */ @BeforeAll - void init(){ + void init() { Namespace ns1 = Namespace.builder() .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .labels(Map.of("support-group", "LDAP-GROUP-1")) - .build()) + .name("ns1") + .cluster("test-cluster") + .labels(Map.of("support-group", "LDAP-GROUP-1")) + .build()) .spec(NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) + .kafkaUser("user1") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) .build(); RoleBinding rb1 = RoleBinding.builder() .metadata(ObjectMeta.builder() - .name("ns1-rb") - .namespace("ns1") - .build()) + .name("ns1-rb") + .namespace("ns1") + .build()) .spec(RoleBindingSpec.builder() - .role(Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(Verb.POST, Verb.GET)) - .build()) - .subject(Subject.builder() - .subjectName("group1") - .subjectType(SubjectType.GROUP) - .build()) - .build()) + .role(Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(Verb.POST, Verb.GET)) + .build()) + .subject(Subject.builder() + .subjectName("group1") + .subjectType(SubjectType.GROUP) + .build()) + .build()) .build(); + UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); + HttpResponse response = + client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); + + token = response.getBody().get().getAccessToken(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); + AccessControlEntry ns1acl = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("ns1-acl") - .namespace("ns1") - .build()) + .name("ns1-acl") + .namespace("ns1") + .build()) .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns1") - .build()) + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns1") + .build()) .build(); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(ns1acl)); + Namespace ns2 = Namespace.builder() .metadata(ObjectMeta.builder() - .name("ns2") - .cluster("test-cluster") - .build()) + .name("ns2") + .cluster("test-cluster") + .build()) .spec(NamespaceSpec.builder() - .kafkaUser("user2") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) + .kafkaUser("user2") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) .build(); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns2)); + RoleBinding rb2 = RoleBinding.builder() .metadata(ObjectMeta.builder() - .name("ns2-rb") - .namespace("ns2") - .build()) + .name("ns2-rb") + .namespace("ns2") + .build()) .spec(RoleBindingSpec.builder() - .role(Role.builder() - .resourceTypes(List.of("topics", "acls")) - .verbs(List.of(Verb.POST, Verb.GET)) - .build()) - .subject(Subject.builder() - .subjectName("group2") - .subjectType(SubjectType.GROUP) - .build()) - .build()) + .role(Role.builder() + .resourceTypes(List.of("topics", "acls")) + .verbs(List.of(Verb.POST, Verb.GET)) + .build()) + .subject(Subject.builder() + .subjectName("group2") + .subjectType(SubjectType.GROUP) + .build()) + .build()) .build(); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns2/role-bindings").bearerAuth(token).body(rb2)); + AccessControlEntry ns2acl = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("ns2-acl") - .namespace("ns2") - .build()) + .name("ns2-acl") + .namespace("ns2") + .build()) .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns2-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.OWNER) - .grantedTo("ns2") - .build()) + .resourceType(ResourceType.TOPIC) + .resource("ns2-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.OWNER) + .grantedTo("ns2") + .build()) .build(); - UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin","admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), BearerAccessRefreshToken.class); - - token = response.getBody().get().getAccessToken(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/role-bindings").bearerAuth(token).body(rb1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(ns1acl)); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces").bearerAuth(token).body(ns2)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns2/role-bindings").bearerAuth(token).body(rb2)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns2/acls").bearerAuth(token).body(ns2acl)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns2/acls").bearerAuth(token).body(ns2acl)); } - /** - * Validate AKHQ claims - */ @Test - void akhqClaim(){ - AkhqClaimProviderController.AKHQClaimRequest akhqClaimRequest = AkhqClaimProviderController.AKHQClaimRequest.builder() + void akhqClaim() { + AkhqClaimProviderController.AkhqClaimRequest akhqClaimRequest = + AkhqClaimProviderController.AkhqClaimRequest.builder() .username("test") .groups(List.of("LDAP-GROUP-1")) .providerName("LDAP") .build(); - AkhqClaimProviderController.AKHQClaimResponse response = client.toBlocking().retrieve( - HttpRequest.POST("/akhq-claim", akhqClaimRequest), - AkhqClaimProviderController.AKHQClaimResponse.class); + AkhqClaimProviderController.AkhqClaimResponse response = client.toBlocking().retrieve( + HttpRequest.POST("/akhq-claim", akhqClaimRequest), + AkhqClaimProviderController.AkhqClaimResponse.class); assertLinesMatch( - List.of( - "topic/read", - "topic/data/read", - "group/read", - "registry/read", - "connect/read", - "connect/state/update" - ), - response.getRoles()); + List.of( + "topic/read", + "topic/data/read", + "group/read", + "registry/read", + "connect/read", + "connect/state/update" + ), + response.getRoles()); assertEquals(1, response.getAttributes().get("topicsFilterRegexp").size()); assertLinesMatch(List.of("^\\Qns1-\\E.*$"), response.getAttributes().get("topicsFilterRegexp")); } - /** - * Validate topic creation - * @throws ExecutionException Any execution exception - * @throws InterruptedException Any interrupted exception - */ @Test void createTopic() throws InterruptedException, ExecutionException { Topic topicFirstCreate = Topic.builder() .metadata(ObjectMeta.builder() - .name("ns1-topicFirstCreate") - .namespace("ns1") - .build()) + .name("ns1-topicFirstCreate") + .namespace("ns1") + .build()) .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) .build(); - var response = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topicFirstCreate)); + var response = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topicFirstCreate)); assertEquals("created", response.header("X-Ns4kafka-Result")); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); Admin kafkaClient = getAdminClient(); - System.out.println(kafkaClient.describeTopics(List.of("ns1-topicFirstCreate")).all().get()); - List topicPartitionInfos = kafkaClient.describeTopics(List.of("ns1-topicFirstCreate")).all().get() - .get("ns1-topicFirstCreate").partitions(); + System.out.println(kafkaClient.describeTopics(List.of("ns1-topicFirstCreate")).allTopicNames().get()); + List topicPartitionInfos = + kafkaClient.describeTopics(List.of("ns1-topicFirstCreate")).allTopicNames().get() + .get("ns1-topicFirstCreate").partitions(); assertEquals(topicFirstCreate.getSpec().getPartitions(), topicPartitionInfos.size()); Map config = topicFirstCreate.getSpec().getConfigs(); Set configKey = config.keySet(); - ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC,"ns1-topicFirstCreate"); - List valueToVerify = kafkaClient.describeConfigs(List.of(configResource)).all().get().get(configResource).entries().stream() - .filter(e -> configKey.contains(e.name())) + ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, "ns1-topicFirstCreate"); + List valueToVerify = + kafkaClient.describeConfigs(List.of(configResource)).all().get().get(configResource).entries().stream() + .filter(e -> configKey.contains(e.name())) .toList(); assertEquals(config.size(), valueToVerify.size()); valueToVerify.forEach(entry -> assertEquals(config.get(entry.name()), entry.value())); } - /** - * Validate topic update - * @throws ExecutionException Any execution exception - * @throws InterruptedException Any interrupted exception - */ @Test void updateTopic() throws InterruptedException, ExecutionException { Topic topic2Create = Topic.builder() .metadata(ObjectMeta.builder() - .name("ns1-topic2Create") - .namespace("ns1") - .build()) + .name("ns1-topic2Create") + .namespace("ns1") + .build()) .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) .build(); - var response = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topic2Create)); + var response = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topic2Create)); assertEquals("created", response.header("X-Ns4kafka-Result")); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - response = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topic2Create)); + response = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topic2Create)); assertEquals("unchanged", response.header("X-Ns4kafka-Result")); Topic topic2Update = Topic.builder() .metadata(ObjectMeta.builder() - .name("ns1-topic2Create") - .namespace("ns1") - .build()) + .name("ns1-topic2Create") + .namespace("ns1") + .build()) .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "70000"))//This line was changed - .build()) + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "70000"))//This line was changed + .build()) .build(); - response = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topic2Update)); + response = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topic2Update)); assertEquals("changed", response.header("X-Ns4kafka-Result")); //force Topic Sync topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); Admin kafkaClient = getAdminClient(); - System.out.println(kafkaClient.describeTopics(List.of("ns1-topic2Create")).all().get()); - List topicPartitionInfos = kafkaClient.describeTopics(List.of("ns1-topic2Create")).all().get() - .get("ns1-topic2Create").partitions(); + System.out.println(kafkaClient.describeTopics(List.of("ns1-topic2Create")).allTopicNames().get()); + List topicPartitionInfos = + kafkaClient.describeTopics(List.of("ns1-topic2Create")).allTopicNames().get() + .get("ns1-topic2Create").partitions(); // verify partition of the updated topic assertEquals(topic2Update.getSpec().getPartitions(), topicPartitionInfos.size()); @@ -295,143 +309,133 @@ void updateTopic() throws InterruptedException, ExecutionException { Map config = topic2Update.getSpec().getConfigs(); Set configKey = config.keySet(); - ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC,"ns1-topic2Create"); - List valueToVerify = kafkaClient.describeConfigs(List.of(configResource)).all().get().get(configResource).entries().stream() - .filter(e -> configKey.contains(e.name())) + ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, "ns1-topic2Create"); + List valueToVerify = + kafkaClient.describeConfigs(List.of(configResource)).all().get().get(configResource).entries().stream() + .filter(e -> configKey.contains(e.name())) .toList(); assertEquals(config.size(), valueToVerify.size()); - valueToVerify.forEach(entry -> { - assertEquals(config.get(entry.name()), entry.value()); - }); + valueToVerify.forEach(entry -> assertEquals(config.get(entry.name()), entry.value())); } - /** - * Validate topic creation when topic name is invalid - */ @Test void invalidTopicName() { Topic topicFirstCreate = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-invalid-é") - .namespace("ns1") - .build()) - .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-invalid-é") + .namespace("ns1") + .build()) + .spec(TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics") - .bearerAuth(token) - .body(topicFirstCreate))); + () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics") + .bearerAuth(token) + .body(topicFirstCreate))); assertEquals("Invalid Resource", exception.getMessage()); - assertEquals("topic.metadata.name: must match \"^[a-zA-Z0-9_.-]+$\"", exception.getResponse().getBody(Status.class).get().getDetails().getCauses().get(0)); + assertEquals("topic.metadata.name: must match \"^[a-zA-Z0-9_.-]+$\"", + exception.getResponse().getBody(Status.class).get().getDetails().getCauses().get(0)); } - /** - * Validate topic creation when there is no change on it - */ @Test void updateTopicNoChange() { AccessControlEntry aclns1Tons2 = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("ns1-acltons2") - .namespace("ns1") - .build()) + .name("ns1-acltons2") + .namespace("ns1") + .build()) .spec(AccessControlEntrySpec.builder() - .resourceType(ResourceType.TOPIC) - .resource("ns1-") - .resourcePatternType(ResourcePatternType.PREFIXED) - .permission(Permission.READ) - .grantedTo("ns2") - .build()) + .resourceType(ResourceType.TOPIC) + .resource("ns1-") + .resourcePatternType(ResourcePatternType.PREFIXED) + .permission(Permission.READ) + .grantedTo("ns2") + .build()) .build(); Topic topicToModify = Topic.builder() .metadata(ObjectMeta.builder() - .name("ns1-topicToModify") - .namespace("ns1") - .build()) + .name("ns1-topicToModify") + .namespace("ns1") + .build()) .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) .build(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/acls").bearerAuth(token).body(aclns1Tons2)); + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/acls").bearerAuth(token).body(aclns1Tons2)); - assertEquals(HttpStatus.OK, client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topicToModify)).getStatus()); + assertEquals(HttpStatus.OK, client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topicToModify)) + .getStatus()); Topic topicToModifyBis = Topic.builder() .metadata(topicToModify.getMetadata()) .spec(TopicSpec.builder() .partitions(3) .replicationFactor(1) .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "90000")) + "min.insync.replicas", "1", + "retention.ms", "90000")) .build()) .build(); - HttpClientResponseException exception = assertThrows(HttpClientResponseException.class,() -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns2/topics").bearerAuth(token).body(topicToModifyBis))); + HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, + () -> client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns2/topics").bearerAuth(token) + .body(topicToModifyBis))); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, exception.getStatus()); // Compare spec of the topics and assure there is no change - assertEquals(topicToModify.getSpec(), client.toBlocking().retrieve(HttpRequest.create(HttpMethod.GET,"/api/namespaces/ns1/topics/ns1-topicToModify").bearerAuth(token), Topic.class ).getSpec()); + assertEquals(topicToModify.getSpec(), client.toBlocking().retrieve( + HttpRequest.create(HttpMethod.GET, "/api/namespaces/ns1/topics/ns1-topicToModify").bearerAuth(token), + Topic.class).getSpec()); } - /** - * Validate records deletion on topic - */ @Test void testDeleteRecords() { Topic topicToDelete = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-topicToDelete") - .namespace("ns1") - .build()) - .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-topicToDelete") + .namespace("ns1") + .build()) + .spec(TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); - var response = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topicToDelete)); + var response = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topicToDelete)); assertEquals("created", response.header("X-Ns4kafka-Result")); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); - List deleteRecordsResponse = client.toBlocking().retrieve(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics/ns1-topicToDelete/delete-records").bearerAuth(token), Argument.listOf(DeleteRecordsResponse.class)); + List deleteRecordsResponse = client.toBlocking().retrieve( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics/ns1-topicToDelete/delete-records") + .bearerAuth(token), Argument.listOf(DeleteRecordsResponse.class)); DeleteRecordsResponse resultPartition0 = deleteRecordsResponse - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 0) - .findFirst() - .orElse(null); - - DeleteRecordsResponse resultPartition1 = deleteRecordsResponse - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 1) - .findFirst() - .orElse(null); - - DeleteRecordsResponse resultPartition2 = deleteRecordsResponse - .stream() - .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 2) - .findFirst() - .orElse(null); + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 0) + .findFirst() + .orElse(null); assertEquals(3L, deleteRecordsResponse.size()); @@ -440,80 +444,69 @@ void testDeleteRecords() { assertEquals(0, resultPartition0.getSpec().getPartition()); assertEquals("ns1-topicToDelete", resultPartition0.getSpec().getTopic()); + DeleteRecordsResponse resultPartition1 = deleteRecordsResponse + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 1) + .findFirst() + .orElse(null); + assertNotNull(resultPartition1); assertEquals(0, resultPartition1.getSpec().getOffset()); assertEquals(1, resultPartition1.getSpec().getPartition()); assertEquals("ns1-topicToDelete", resultPartition1.getSpec().getTopic()); + DeleteRecordsResponse resultPartition2 = deleteRecordsResponse + .stream() + .filter(deleteRecord -> deleteRecord.getSpec().getPartition() == 2) + .findFirst() + .orElse(null); + assertNotNull(resultPartition2); assertEquals(0, resultPartition2.getSpec().getOffset()); assertEquals(2, resultPartition2.getSpec().getPartition()); assertEquals("ns1-topicToDelete", resultPartition2.getSpec().getTopic()); - } + } - /** - * Validate records deletion on compacted topic - */ @Test void testDeleteRecordsCompactTopic() { Topic topicToDelete = Topic.builder() - .metadata(ObjectMeta.builder() - .name("ns1-compactTopicToDelete") - .namespace("ns1") - .build()) - .spec(TopicSpec.builder() - .partitions(3) - .replicationFactor(1) - .configs(Map.of("cleanup.policy", "compact", - "min.insync.replicas", "1", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1-compactTopicToDelete") + .namespace("ns1") + .build()) + .spec(TopicSpec.builder() + .partitions(3) + .replicationFactor(1) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "1", + "retention.ms", "60000")) + .build()) + .build(); - var response = client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics").bearerAuth(token).body(topicToDelete)); + var response = client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics").bearerAuth(token).body(topicToDelete)); assertEquals("created", response.header("X-Ns4kafka-Result")); topicAsyncExecutorList.forEach(TopicAsyncExecutor::run); HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, - () -> client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST,"/api/namespaces/ns1/topics/compactTopicToDelete/delete-records") - .bearerAuth(token))); + () -> client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/topics/compactTopicToDelete/delete-records") + .bearerAuth(token))); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, exception.getStatus()); } - /** - * Bearer token class - */ @Data @NoArgsConstructor @AllArgsConstructor public static class BearerAccessRefreshToken { - /** - * The username - */ private String username; - - /** - * The roles - */ private Collection roles; - - /** - * The access token - */ @JsonProperty("access_token") private String accessToken; - - /** - * The token type - */ @JsonProperty("token_type") private String tokenType; - - /** - * The expires in - */ @JsonProperty("expires_in") private Integer expiresIn; } diff --git a/src/test/java/com/michelin/ns4kafka/integration/UserTest.java b/src/test/java/com/michelin/ns4kafka/integration/UserTest.java index 57d7fd55..6ced99fa 100644 --- a/src/test/java/com/michelin/ns4kafka/integration/UserTest.java +++ b/src/test/java/com/michelin/ns4kafka/integration/UserTest.java @@ -1,5 +1,9 @@ package com.michelin.ns4kafka.integration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.models.KafkaUserResetPassword; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -18,6 +22,9 @@ import io.micronaut.security.authentication.UsernamePasswordCredentials; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import jakarta.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; import org.apache.kafka.clients.admin.ScramMechanism; import org.apache.kafka.clients.admin.UserScramCredentialsDescription; import org.apache.kafka.common.quota.ClientQuotaEntity; @@ -27,12 +34,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.*; - @MicronautTest @Property(name = "micronaut.security.gitlab.enabled", value = "false") class UserTest extends AbstractIntegrationTest { @@ -48,63 +49,71 @@ class UserTest extends AbstractIntegrationTest { @BeforeAll void init() { Namespace ns1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns1") - .cluster("test-cluster") - .labels(Map.of("support-group", "LDAP-GROUP-1")) - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) - .build(); - Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns2") - .cluster("test-cluster") - .labels(Map.of("support-group", "LDAP-GROUP-2")) - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user2") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) - .build(); - Namespace ns3 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("ns3") - .cluster("test-cluster") - .labels(Map.of("support-group", "LDAP-GROUP-3")) - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user3") - .connectClusters(List.of("test-connect")) - .topicValidator(TopicValidator.makeDefaultOneBroker()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("ns1") + .cluster("test-cluster") + .labels(Map.of("support-group", "LDAP-GROUP-1")) + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) + .build(); - ResourceQuota rqNs2 = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .name("rqNs2") - .namespace("ns2") - .build()) - .spec(Map.of( - ResourceQuota.ResourceQuotaSpecKey.USER_PRODUCER_BYTE_RATE.getKey(), "204800.0", - ResourceQuota.ResourceQuotaSpecKey.USER_CONSUMER_BYTE_RATE.getKey(), "409600.0")) - .build(); + Namespace ns2 = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("ns2") + .cluster("test-cluster") + .labels(Map.of("support-group", "LDAP-GROUP-2")) + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user2") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) + .build(); UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - HttpResponse response = client.toBlocking().exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class); + HttpResponse response = client.toBlocking() + .exchange(HttpRequest.POST("/login", credentials), TopicTest.BearerAccessRefreshToken.class); token = response.getBody().get().getAccessToken(); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns1)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns2)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns2/resource-quotas").bearerAuth(token).body(rqNs2)); + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns2)); - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns3)); + ResourceQuota rqNs2 = ResourceQuota.builder() + .metadata(ObjectMeta.builder() + .name("rqNs2") + .namespace("ns2") + .build()) + .spec(Map.of( + ResourceQuota.ResourceQuotaSpecKey.USER_PRODUCER_BYTE_RATE.getKey(), "204800.0", + ResourceQuota.ResourceQuotaSpecKey.USER_CONSUMER_BYTE_RATE.getKey(), "409600.0")) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns2/resource-quotas").bearerAuth(token).body(rqNs2)); + + Namespace ns3 = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("ns3") + .cluster("test-cluster") + .labels(Map.of("support-group", "LDAP-GROUP-3")) + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user3") + .connectClusters(List.of("test-connect")) + .topicValidator(TopicValidator.makeDefaultOneBroker()) + .build()) + .build(); + + client.toBlocking() + .exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces").bearerAuth(token).body(ns3)); //force User Sync userAsyncExecutors.forEach(UserAsyncExecutor::run); @@ -114,9 +123,9 @@ void init() { @Test void checkDefaultQuotas() throws ExecutionException, InterruptedException { Map> mapQuota = getAdminClient() - .describeClientQuotas(ClientQuotaFilter.containsOnly( - List.of(ClientQuotaFilterComponent.ofEntity("user", "user1"))) - ).entities().get(); + .describeClientQuotas(ClientQuotaFilter.containsOnly( + List.of(ClientQuotaFilterComponent.ofEntity("user", "user1"))) + ).entities().get(); assertEquals(1, mapQuota.entrySet().size()); Map quotas = mapQuota.entrySet().stream().findFirst().get().getValue(); @@ -125,12 +134,13 @@ void checkDefaultQuotas() throws ExecutionException, InterruptedException { assertTrue(quotas.containsKey("consumer_byte_rate")); assertEquals(102400.0, quotas.get("consumer_byte_rate")); } + @Test void checkCustomQuotas() throws ExecutionException, InterruptedException { Map> mapQuota = getAdminClient() - .describeClientQuotas(ClientQuotaFilter.containsOnly( - List.of(ClientQuotaFilterComponent.ofEntity("user", "user2"))) - ).entities().get(); + .describeClientQuotas(ClientQuotaFilter.containsOnly( + List.of(ClientQuotaFilterComponent.ofEntity("user", "user2"))) + ).entities().get(); assertEquals(1, mapQuota.entrySet().size()); Map quotas = mapQuota.entrySet().stream().findFirst().get().getValue(); @@ -139,28 +149,30 @@ void checkCustomQuotas() throws ExecutionException, InterruptedException { assertTrue(quotas.containsKey("consumer_byte_rate")); assertEquals(409600.0, quotas.get("consumer_byte_rate")); } + @Test void checkUpdateQuotas() throws ExecutionException, InterruptedException { // Update the namespace user quotas ResourceQuota rq3 = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .name("rqNs3") - .namespace("ns3") - .build()) - .spec(Map.of( - ResourceQuota.ResourceQuotaSpecKey.USER_PRODUCER_BYTE_RATE.getKey(), "204800.0", - ResourceQuota.ResourceQuotaSpecKey.USER_CONSUMER_BYTE_RATE.getKey(), "409600.0")) - .build(); - - client.toBlocking().exchange(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns3/resource-quotas").bearerAuth(token).body(rq3)); + .metadata(ObjectMeta.builder() + .name("rqNs3") + .namespace("ns3") + .build()) + .spec(Map.of( + ResourceQuota.ResourceQuotaSpecKey.USER_PRODUCER_BYTE_RATE.getKey(), "204800.0", + ResourceQuota.ResourceQuotaSpecKey.USER_CONSUMER_BYTE_RATE.getKey(), "409600.0")) + .build(); + + client.toBlocking().exchange( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns3/resource-quotas").bearerAuth(token).body(rq3)); // Force user sync to force the quota update userAsyncExecutors.forEach(UserAsyncExecutor::run); Map> mapQuota = getAdminClient() - .describeClientQuotas(ClientQuotaFilter.containsOnly( - List.of(ClientQuotaFilterComponent.ofEntity("user", "user3"))) - ).entities().get(); + .describeClientQuotas(ClientQuotaFilter.containsOnly( + List.of(ClientQuotaFilterComponent.ofEntity("user", "user3"))) + ).entities().get(); assertEquals(1, mapQuota.entrySet().size()); Map quotas = mapQuota.entrySet().stream().findFirst().get().getValue(); @@ -172,10 +184,12 @@ void checkUpdateQuotas() throws ExecutionException, InterruptedException { @Test void createAndUpdateUserForceTest() throws ExecutionException, InterruptedException { - KafkaUserResetPassword response = client.toBlocking().retrieve(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/users/user1/reset-password").bearerAuth(token), KafkaUserResetPassword.class); + KafkaUserResetPassword response = client.toBlocking().retrieve( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/users/user1/reset-password").bearerAuth(token), + KafkaUserResetPassword.class); Map mapUser = getAdminClient() - .describeUserScramCredentials(List.of("user1")).all().get(); + .describeUserScramCredentials(List.of("user1")).all().get(); Assertions.assertNotNull(response.getSpec().getNewPassword()); assertTrue(mapUser.containsKey("user1")); @@ -185,9 +199,13 @@ void createAndUpdateUserForceTest() throws ExecutionException, InterruptedExcept @Test void updateUserFail_NotMatching() { - HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, () -> client.toBlocking().retrieve(HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/users/user2/reset-password").bearerAuth(token), KafkaUserResetPassword.class)); + HttpClientResponseException exception = assertThrows(HttpClientResponseException.class, + () -> client.toBlocking().retrieve( + HttpRequest.create(HttpMethod.POST, "/api/namespaces/ns1/users/user2/reset-password").bearerAuth(token), + KafkaUserResetPassword.class)); assertEquals(HttpStatus.UNPROCESSABLE_ENTITY, exception.getStatus()); - assertEquals("Invalid user user2 : Doesn't belong to namespace ns1", exception.getResponse().getBody(Status.class).get().getDetails().getCauses().get(0)); + assertEquals("Invalid user user2 : Doesn't belong to namespace ns1", + exception.getResponse().getBody(Status.class).get().getDetails().getCauses().get(0)); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/AccessControlEntryTest.java b/src/test/java/com/michelin/ns4kafka/models/AccessControlEntryTest.java index f39bd47c..d8a20401 100644 --- a/src/test/java/com/michelin/ns4kafka/models/AccessControlEntryTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/AccessControlEntryTest.java @@ -1,88 +1,96 @@ package com.michelin.ns4kafka.models; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import org.junit.jupiter.api.Test; + +/** + * Access control entry test. + */ class AccessControlEntryTest { @Test void testEquals() { AccessControlEntry original = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource1") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .grantedTo("other1") - .permission(AccessControlEntry.Permission.OWNER) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource1") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .grantedTo("other1") + .permission(AccessControlEntry.Permission.OWNER) + .build()) + .build(); AccessControlEntry same = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource1") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .grantedTo("other1") - .permission(AccessControlEntry.Permission.OWNER) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource1") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .grantedTo("other1") + .permission(AccessControlEntry.Permission.OWNER) + .build()) + .build(); + + assertEquals(original, same); AccessControlEntry differentByResource = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource2") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .grantedTo("other1") - .permission(AccessControlEntry.Permission.OWNER) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource2") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .grantedTo("other1") + .permission(AccessControlEntry.Permission.OWNER) + .build()) + .build(); + + assertNotEquals(original, differentByResource); AccessControlEntry differentByResourcePatternType = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource1") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .grantedTo("other1") - .permission(AccessControlEntry.Permission.OWNER) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource1") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .grantedTo("other1") + .permission(AccessControlEntry.Permission.OWNER) + .build()) + .build(); + + assertNotEquals(original, differentByResourcePatternType); AccessControlEntry differentByResourceType = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource1") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .grantedTo("other1") - .permission(AccessControlEntry.Permission.OWNER) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource1") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .grantedTo("other1") + .permission(AccessControlEntry.Permission.OWNER) + .build()) + .build(); + + assertNotEquals(original, differentByResourceType); AccessControlEntry differentByGrantedTo = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource1") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .grantedTo("other2") - .permission(AccessControlEntry.Permission.OWNER) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource1") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .grantedTo("other2") + .permission(AccessControlEntry.Permission.OWNER) + .build()) + .build(); + + assertNotEquals(original, differentByGrantedTo); AccessControlEntry differentByPermission = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resource("resource1") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .grantedTo("other1") - .permission(AccessControlEntry.Permission.READ) - .build()) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resource("resource1") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .grantedTo("other1") + .permission(AccessControlEntry.Permission.READ) + .build()) + .build(); - assertEquals(original, same); - assertNotEquals(original, differentByResource); - assertNotEquals(original, differentByResourcePatternType); - assertNotEquals(original, differentByResourceType); - assertNotEquals(original, differentByGrantedTo); assertNotEquals(original, differentByPermission); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/ConnectValidatorTest.java b/src/test/java/com/michelin/ns4kafka/models/ConnectValidatorTest.java index 32183ed6..3fea0d39 100644 --- a/src/test/java/com/michelin/ns4kafka/models/ConnectValidatorTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/ConnectValidatorTest.java @@ -1,144 +1,147 @@ package com.michelin.ns4kafka.models; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.validation.ConnectValidator; import com.michelin.ns4kafka.validation.ResourceValidator; -import org.junit.jupiter.api.Test; - import java.util.List; import java.util.Map; - -import static org.junit.jupiter.api.Assertions.*; +import org.junit.jupiter.api.Test; class ConnectValidatorTest { @Test - void testEquals(){ + void testEquals() { ConnectValidator original = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of("db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of("db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); ConnectValidator same = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); ConnectValidator differentByGeneralRules = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString())) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( - "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString())) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); ConnectValidator differentBySourceRules = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of()) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of()) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); + + assertEquals(original, same); + assertNotEquals(original, differentByGeneralRules); + assertNotEquals(original, differentBySourceRules); ConnectValidator differentBySinkRules = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of()) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of()) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); + + assertNotEquals(original, differentBySinkRules); ConnectValidator differentByClassRules = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( - "io.confluent.connect.jdbc.JdbcSinkConnector_oops", // <<<<< here oops - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", + "io.confluent.connect.jdbc.JdbcSinkConnector", + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector_oops", // <<<<< here oops + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); - assertEquals(original, same); - assertNotEquals(original, differentByGeneralRules); - assertNotEquals(original, differentBySourceRules); - assertNotEquals(original, differentBySinkRules); assertNotEquals(original, differentByClassRules); } @Test void shouldNotValidateConnectorWithNoName() { ConnectValidator validator = ConnectValidator.builder() - .build(); + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .build()) + .build(); List actual = validator.validate(connector, "sink"); assertEquals(1, actual.size()); @@ -148,59 +151,75 @@ void shouldNotValidateConnectorWithNoName() { @Test void shouldNotValidateConnectorBecauseNameLengthAndSpecialChars() { ConnectValidator validator = ConnectValidator.builder() - .build(); + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("$thisNameIsDefinitelyToLooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongToBeAConnectorName$") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", - "key.converter", "test", - "value.converter", "test", - "consumer.override.sasl.jaas.config", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name( + "$thisNameIsDefinitelyToLooooooooooooooooooooooooooooooooooooooooooo" + + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "oooooooooooooooooooooooooooooooooooooooongToBeAConnectorName$") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", + "key.converter", "test", + "value.converter", "test", + "consumer.override.sasl.jaas.config", "test")) + .build()) + .build(); List actual = validator.validate(connector, "sink"); assertEquals(2, actual.size()); - assertEquals("Invalid value $thisNameIsDefinitelyToLooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongToBeAConnectorName$ for name: Value must not be longer than 249", actual.get(0)); - assertEquals("Invalid value $thisNameIsDefinitelyToLooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongToBeAConnectorName$ for name: Value must only contain ASCII alphanumerics, '.', '_' or '-'", actual.get(1)); + assertEquals( + "Invalid value $thisNameIsDefinitelyToLoooooooooooooooo" + + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "ooooooooooooooongToBeAConnectorName$ for name: Value must not be longer than 249", + actual.get(0)); + assertEquals( + "Invalid value $thisNameIsDefinitelyToLooooooooooooooooooooooo" + + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + + "oooooooooooooooooooooooooooooooooooooooooooooooooooongToBeAConnectorName$ for name: " + + "Value must only contain ASCII alphanumerics, '.', '_' or '-'", + actual.get(1)); } @Test void shouldValidateWithNoClassValidationConstraint() { ConnectValidator validator = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .build(); + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", + "io.confluent.connect.jdbc.JdbcSinkConnector", + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", - "key.converter", "test", - "value.converter", "test", - "consumer.override.sasl.jaas.config", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", + "key.converter", "test", + "value.converter", "test", + "consumer.override.sasl.jaas.config", "test")) + .build()) + .build(); List actual = validator.validate(connector, "sink"); assertTrue(actual.isEmpty()); @@ -209,31 +228,31 @@ void shouldValidateWithNoClassValidationConstraint() { @Test void shouldValidateWithNoSinkValidationConstraint() { ConnectValidator validator = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .build(); + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", + "io.confluent.connect.jdbc.JdbcSinkConnector", + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSinkConnector", - "key.converter", "test", - "value.converter", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSinkConnector", + "key.converter", "test", + "value.converter", "test")) + .build()) + .build(); List actual = validator.validate(connector, "sink"); assertTrue(actual.isEmpty()); @@ -242,20 +261,20 @@ void shouldValidateWithNoSinkValidationConstraint() { @Test void shouldValidateWithNoValidationConstraint() { ConnectValidator validator = ConnectValidator.builder() - .build(); + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSinkConnector", - "key.converter", "test", - "value.converter", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSinkConnector", + "key.converter", "test", + "value.converter", "test")) + .build()) + .build(); List actual = validator.validate(connector, "sink"); assertTrue(actual.isEmpty()); @@ -264,38 +283,38 @@ void shouldValidateWithNoValidationConstraint() { @Test void shouldValidateSourceConnector() { ConnectValidator validator = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", - "key.converter", "test", - "value.converter", "test", - "producer.override.sasl.jaas.config", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", + "key.converter", "test", + "value.converter", "test", + "producer.override.sasl.jaas.config", "test")) + .build()) + .build(); List actual = validator.validate(connector, "source"); assertTrue(actual.isEmpty()); @@ -304,81 +323,83 @@ void shouldValidateSourceConnector() { @Test void shouldNotValidateSourceConnector() { ConnectValidator validator = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", - "key.converter", "test", - "value.converter", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSourceConnector", + "key.converter", "test", + "value.converter", "test")) + .build()) + .build(); List actual = validator.validate(connector, "source"); assertEquals(1, actual.size()); - assertEquals("Invalid value null for configuration producer.override.sasl.jaas.config: Value must be non-null", actual.get(0)); + assertEquals("Invalid value null for configuration producer.override.sasl.jaas.config: Value must be non-null", + actual.get(0)); } @Test void shouldNotValidateSinkConnector() { ConnectValidator validator = ConnectValidator.builder() - .validationConstraints(Map.of( - "key.converter", new ResourceValidator.NonEmptyString(), - "value.converter", new ResourceValidator.NonEmptyString(), - "connector.class", new ResourceValidator.ValidString( - List.of("io.confluent.connect.jdbc.JdbcSourceConnector", - "io.confluent.connect.jdbc.JdbcSinkConnector", - "com.splunk.kafka.connect.SplunkSinkConnector", - "org.apache.kafka.connect.file.FileStreamSinkConnector"), - false))) - .sourceValidationConstraints(Map.of( - "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of( - "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) - .classValidationConstraints(Map.of( + .validationConstraints(Map.of( + "key.converter", new ResourceValidator.NonEmptyString(), + "value.converter", new ResourceValidator.NonEmptyString(), + "connector.class", new ResourceValidator.ValidString( + List.of("io.confluent.connect.jdbc.JdbcSourceConnector", "io.confluent.connect.jdbc.JdbcSinkConnector", - Map.of( - "db.timezone", new ResourceValidator.NonEmptyString()))) - .build(); + "com.splunk.kafka.connect.SplunkSinkConnector", + "org.apache.kafka.connect.file.FileStreamSinkConnector"), + false))) + .sourceValidationConstraints(Map.of( + "producer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of( + "consumer.override.sasl.jaas.config", new ResourceValidator.NonEmptyString())) + .classValidationConstraints(Map.of( + "io.confluent.connect.jdbc.JdbcSinkConnector", + Map.of( + "db.timezone", new ResourceValidator.NonEmptyString()))) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of( - "connector.class", "io.confluent.connect.jdbc.JdbcSinkConnector", - "key.converter", "test", - "value.converter", "test")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of( + "connector.class", "io.confluent.connect.jdbc.JdbcSinkConnector", + "key.converter", "test", + "value.converter", "test")) + .build()) + .build(); List actual = validator.validate(connector, "sink"); assertEquals(2, actual.size()); - assertTrue(actual.contains("Invalid value null for configuration consumer.override.sasl.jaas.config: Value must be non-null")); + assertTrue(actual.contains( + "Invalid value null for configuration consumer.override.sasl.jaas.config: Value must be non-null")); assertTrue(actual.contains("Invalid value null for configuration db.timezone: Value must be non-null")); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/ConnectorTest.java b/src/test/java/com/michelin/ns4kafka/models/ConnectorTest.java index 1ae56fb1..a179af34 100644 --- a/src/test/java/com/michelin/ns4kafka/models/ConnectorTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/ConnectorTest.java @@ -1,82 +1,82 @@ package com.michelin.ns4kafka.models; -import com.michelin.ns4kafka.models.connector.Connector; -import org.junit.jupiter.api.Test; - -import java.util.Map; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import com.michelin.ns4kafka.models.connector.Connector; +import java.util.Map; +import org.junit.jupiter.api.Test; + class ConnectorTest { @Test void testEquals() { Connector original = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of("k1", "v1", - "k2", "v2")) - .build()) - .status(Connector.ConnectorStatus.builder() - .state(Connector.TaskState.RUNNING) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Connector.ConnectorStatus.builder() + .state(Connector.TaskState.RUNNING) + .build()) + .build(); Connector same = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect1") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - // inverted map - .config(Map.of("k2", "v2", - "k1", "v1")) - .build()) - // different status - .status(Connector.ConnectorStatus.builder() - .state(Connector.TaskState.FAILED) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("connect1") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + // inverted map + .config(Map.of("k2", "v2", + "k1", "v1")) + .build()) + // different status + .status(Connector.ConnectorStatus.builder() + .state(Connector.TaskState.FAILED) + .build()) + .build(); Connector differentByConnectCluster = Connector.builder() - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster2") - .config(Map.of("k1", "v1", - "k2", "v2")) - .build()) - .build(); + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster2") + .config(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .build(); Connector differentByConfig = Connector.builder() - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster2") - .config(Map.of("k1", "v1", - "k2", "v2", - "k3", "v3")) - .build()) - .build(); - - Connector differentByMetadata = Connector.builder() - .metadata(ObjectMeta.builder() - .name("connect2") - .build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("cluster1") - .config(Map.of("k1", "v1", - "k2", "v2")) - .build()) - .status(Connector.ConnectorStatus.builder() - .state(Connector.TaskState.RUNNING) - .build()) - .build(); + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster2") + .config(Map.of("k1", "v1", + "k2", "v2", + "k3", "v3")) + .build()) + .build(); // objects are same, even if status differs assertEquals(original, same); assertNotEquals(original, differentByConnectCluster); assertNotEquals(original, differentByConfig); + + Connector differentByMetadata = Connector.builder() + .metadata(ObjectMeta.builder() + .name("connect2") + .build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("cluster1") + .config(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Connector.ConnectorStatus.builder() + .state(Connector.TaskState.RUNNING) + .build()) + .build(); + assertNotEquals(original, differentByMetadata); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/NamespaceTest.java b/src/test/java/com/michelin/ns4kafka/models/NamespaceTest.java index 7122958e..a647038f 100644 --- a/src/test/java/com/michelin/ns4kafka/models/NamespaceTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/NamespaceTest.java @@ -1,108 +1,115 @@ package com.michelin.ns4kafka.models; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + import com.michelin.ns4kafka.validation.ConnectValidator; import com.michelin.ns4kafka.validation.TopicValidator; -import org.junit.jupiter.api.Test; - import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; +import org.junit.jupiter.api.Test; class NamespaceTest { @Test void testEquals() { Namespace original = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace1") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("connect1")) - .topicValidator(TopicValidator.makeDefault()) - .connectValidator(ConnectValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace1") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("connect1")) + .topicValidator(TopicValidator.makeDefault()) + .connectValidator(ConnectValidator.makeDefault()) + .build()) + .build(); Namespace same = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace1") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("connect1")) - .topicValidator(TopicValidator.makeDefault()) - .connectValidator(ConnectValidator.makeDefault()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace1") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("connect1")) + .topicValidator(TopicValidator.makeDefault()) + .connectValidator(ConnectValidator.makeDefault()) + .build()) + .build(); Namespace differentByMetadata = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace2") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("connect1")) - .topicValidator(TopicValidator.makeDefault()) - .connectValidator(ConnectValidator.makeDefault()) - .build()) - .build(); - Namespace differentByUser = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace1") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user2") - .connectClusters(List.of("connect1")) - .topicValidator(TopicValidator.makeDefault()) - .connectValidator(ConnectValidator.makeDefault()) - .build()) - .build(); - Namespace differentByConnectClusters = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace1") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("connect1","connect2")) - .topicValidator(TopicValidator.makeDefault()) - .connectValidator(ConnectValidator.makeDefault()) - .build()) - .build(); - Namespace differentByTopicValidator = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace1") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("connect1")) - .topicValidator(TopicValidator.builder().build()) - .connectValidator(ConnectValidator.makeDefault()) - .build()) - .build(); - Namespace differentByConnectValidator = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace1") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .kafkaUser("user1") - .connectClusters(List.of("connect1")) - .topicValidator(TopicValidator.makeDefault()) - .connectValidator(ConnectValidator.builder().build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace2") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("connect1")) + .topicValidator(TopicValidator.makeDefault()) + .connectValidator(ConnectValidator.makeDefault()) + .build()) + .build(); assertEquals(original, same); assertNotEquals(original, differentByMetadata); + + Namespace differentByUser = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace1") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user2") + .connectClusters(List.of("connect1")) + .topicValidator(TopicValidator.makeDefault()) + .connectValidator(ConnectValidator.makeDefault()) + .build()) + .build(); + assertNotEquals(original, differentByUser); + + Namespace differentByConnectClusters = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace1") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("connect1", "connect2")) + .topicValidator(TopicValidator.makeDefault()) + .connectValidator(ConnectValidator.makeDefault()) + .build()) + .build(); + assertNotEquals(original, differentByConnectClusters); + + Namespace differentByTopicValidator = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace1") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("connect1")) + .topicValidator(TopicValidator.builder().build()) + .connectValidator(ConnectValidator.makeDefault()) + .build()) + .build(); + assertNotEquals(original, differentByTopicValidator); + + Namespace differentByConnectValidator = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace1") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .kafkaUser("user1") + .connectClusters(List.of("connect1")) + .topicValidator(TopicValidator.makeDefault()) + .connectValidator(ConnectValidator.builder().build()) + .build()) + .build(); + assertNotEquals(original, differentByConnectValidator); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/ObjectMetaTest.java b/src/test/java/com/michelin/ns4kafka/models/ObjectMetaTest.java index 12f09a2e..8d92cf04 100644 --- a/src/test/java/com/michelin/ns4kafka/models/ObjectMetaTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/ObjectMetaTest.java @@ -1,46 +1,45 @@ package com.michelin.ns4kafka.models; -import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import java.time.Instant; import java.util.Date; import java.util.Map; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; +import org.junit.jupiter.api.Test; class ObjectMetaTest { @Test void testEquals() { ObjectMeta original = ObjectMeta.builder() - .name("name1") - .namespace("namespace1") - .cluster("local") - .labels(Map.of("key1", "val1", - "key2", "val2")) - .creationTimestamp(Date.from(Instant.now())) - .generation(0) - .build(); + .name("name1") + .namespace("namespace1") + .cluster("local") + .labels(Map.of("key1", "val1", + "key2", "val2")) + .creationTimestamp(Date.from(Instant.now())) + .generation(0) + .build(); ObjectMeta same = ObjectMeta.builder() - .name("name1") - .namespace("namespace1") - .cluster("local") - // inverted map order - .labels(Map.of("key2", "val2", - "key1", "val1")) - // different date - .creationTimestamp(Date.from(Instant.now().plusMillis(1000))) - // different gen - .generation(99) - .build(); + .name("name1") + .namespace("namespace1") + .cluster("local") + // inverted map order + .labels(Map.of("key2", "val2", + "key1", "val1")) + // different date + .creationTimestamp(Date.from(Instant.now().plusMillis(1000))) + // different gen + .generation(99) + .build(); ObjectMeta different = ObjectMeta.builder() - .name("name2") - .namespace("namespace1") - .cluster("local") - .labels(Map.of()) - .creationTimestamp(Date.from(Instant.now())) - .generation(0) - .build(); + .name("name2") + .namespace("namespace1") + .cluster("local") + .labels(Map.of()) + .creationTimestamp(Date.from(Instant.now())) + .generation(0) + .build(); assertEquals(original, same); assertNotEquals(original, different); diff --git a/src/test/java/com/michelin/ns4kafka/models/ResourceValidatorTest.java b/src/test/java/com/michelin/ns4kafka/models/ResourceValidatorTest.java index b44b1b41..0a215079 100644 --- a/src/test/java/com/michelin/ns4kafka/models/ResourceValidatorTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/ResourceValidatorTest.java @@ -1,11 +1,14 @@ package com.michelin.ns4kafka.models; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + import com.michelin.ns4kafka.validation.FieldValidationException; import com.michelin.ns4kafka.validation.ResourceValidator; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.*; - class ResourceValidatorTest { @Test void testNonEmptyString() { @@ -40,10 +43,11 @@ void testRangeBetween() { assertDoesNotThrow(() -> original.ensureValid("k", "10")); assertDoesNotThrow(() -> original.ensureValid("k", "5")); } + @Test void testOptionalRange() { // BETWEEN - ResourceValidator.Validator original = new ResourceValidator.Range(0,10,true); + ResourceValidator.Validator original = new ResourceValidator.Range(0, 10, true); // test ensureValid assertThrows(FieldValidationException.class, () -> original.ensureValid("k", "")); @@ -56,6 +60,7 @@ void testOptionalRange() { assertDoesNotThrow(() -> original.ensureValid("k", "5")); } + @Test void testRangeAtLeast() { ResourceValidator.Validator original = ResourceValidator.Range.atLeast(10); @@ -95,6 +100,7 @@ void testValidString() { assertDoesNotThrow(() -> original.ensureValid("k", "b")); assertDoesNotThrow(() -> original.ensureValid("k", "c")); } + @Test void testOptionalValidString() { ResourceValidator.Validator original = ResourceValidator.ValidString.optionalIn("a", "b", "c"); @@ -134,6 +140,7 @@ void testValidList() { assertDoesNotThrow(() -> original.ensureValid("k", "b,c")); assertDoesNotThrow(() -> original.ensureValid("k", "c,b,a")); } + @Test void testOptionalValidList() { ResourceValidator.Validator original = ResourceValidator.ValidList.optionalIn("a", "b", "c"); diff --git a/src/test/java/com/michelin/ns4kafka/models/RoleBindingTest.java b/src/test/java/com/michelin/ns4kafka/models/RoleBindingTest.java index c4459f65..525fd2bb 100644 --- a/src/test/java/com/michelin/ns4kafka/models/RoleBindingTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/RoleBindingTest.java @@ -1,31 +1,30 @@ package com.michelin.ns4kafka.models; -import org.junit.jupiter.api.Test; - -import java.util.List; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import java.util.List; +import org.junit.jupiter.api.Test; + class RoleBindingTest { @Test - void testEquals_Role() { + void testEqualsRole() { RoleBinding.Role original = RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build(); + .resourceTypes(List.of("res1", "res2")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build(); RoleBinding.Role same = RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build(); + .resourceTypes(List.of("res1", "res2")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build(); RoleBinding.Role differentByResourceTypes = RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2", "res3")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build(); + .resourceTypes(List.of("res1", "res2", "res3")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build(); RoleBinding.Role differentByVerbs = RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2", "res3")) - .verbs(List.of(RoleBinding.Verb.DELETE)) - .build(); + .resourceTypes(List.of("res1", "res2", "res3")) + .verbs(List.of(RoleBinding.Verb.DELETE)) + .build(); assertEquals(original, same); @@ -34,23 +33,23 @@ void testEquals_Role() { } @Test - void testEquals_Subject() { + void testEqualsSubject() { RoleBinding.Subject original = RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build(); + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build(); RoleBinding.Subject same = RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build(); + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build(); RoleBinding.Subject differentByName = RoleBinding.Subject.builder() - .subjectName("subject2") - .subjectType(RoleBinding.SubjectType.GROUP) - .build(); + .subjectName("subject2") + .subjectType(RoleBinding.SubjectType.GROUP) + .build(); RoleBinding.Subject differentByType = RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.USER) - .build(); + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.USER) + .build(); assertEquals(original, same); @@ -59,70 +58,77 @@ void testEquals_Subject() { } @Test - void testEquals_RoleBinding() { + void testEqualsRoleBinding() { RoleBinding original = RoleBinding.builder() - .metadata(ObjectMeta.builder().name("rb1").build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build()) - .subject(RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("rb1").build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("res1", "res2")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build()) + .subject(RoleBinding.Subject.builder() + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); + RoleBinding same = RoleBinding.builder() - .metadata(ObjectMeta.builder().name("rb1").build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build()) - .subject(RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); - RoleBinding differentByMetadata = RoleBinding.builder() - .metadata(ObjectMeta.builder().name("rb1").cluster("cluster").build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build()) - .subject(RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); - RoleBinding differentByRole = RoleBinding.builder() - .metadata(ObjectMeta.builder().name("rb1").build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder().build()) - .subject(RoleBinding.Subject.builder() - .subjectName("subject1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); - RoleBinding differentBySubject = RoleBinding.builder() - .metadata(ObjectMeta.builder().name("rb1").build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("res1", "res2")) - .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) - .build()) - .subject(RoleBinding.Subject.builder().build()) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("rb1").build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("res1", "res2")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build()) + .subject(RoleBinding.Subject.builder() + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); assertEquals(original, same); + + RoleBinding differentByMetadata = RoleBinding.builder() + .metadata(ObjectMeta.builder().name("rb1").cluster("cluster").build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("res1", "res2")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build()) + .subject(RoleBinding.Subject.builder() + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); + assertNotEquals(original, differentByMetadata); + + RoleBinding differentByRole = RoleBinding.builder() + .metadata(ObjectMeta.builder().name("rb1").build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder().build()) + .subject(RoleBinding.Subject.builder() + .subjectName("subject1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); + assertNotEquals(original, differentByRole); + + RoleBinding differentBySubject = RoleBinding.builder() + .metadata(ObjectMeta.builder().name("rb1").build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("res1", "res2")) + .verbs(List.of(RoleBinding.Verb.GET, RoleBinding.Verb.POST)) + .build()) + .subject(RoleBinding.Subject.builder().build()) + .build()) + .build(); + assertNotEquals(original, differentBySubject); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/SchemaTest.java b/src/test/java/com/michelin/ns4kafka/models/SchemaTest.java index 38e06223..3ca52dfe 100644 --- a/src/test/java/com/michelin/ns4kafka/models/SchemaTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/SchemaTest.java @@ -1,92 +1,129 @@ package com.michelin.ns4kafka.models; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + import com.michelin.ns4kafka.models.schema.Schema; import com.michelin.ns4kafka.models.schema.SchemaList; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; - class SchemaTest { @Test void testEquals() { Schema original = Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .spec(Schema.SchemaSpec.builder() - .compatibility(Schema.Compatibility.BACKWARD) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .spec(Schema.SchemaSpec.builder() + .compatibility(Schema.Compatibility.BACKWARD) + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":" + + "[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":" + + "[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}]," + + "\"default\":null,\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); Schema same = Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .spec(Schema.SchemaSpec.builder() - .compatibility(Schema.Compatibility.BACKWARD) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .spec(Schema.SchemaSpec.builder() + .compatibility(Schema.Compatibility.BACKWARD) + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":" + + "[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":" + + "[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"}," + + "{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\"," + + "\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); + + assertEquals(original, same); Schema different = Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .spec(Schema.SchemaSpec.builder() - .compatibility(Schema.Compatibility.BACKWARD) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .spec(Schema.SchemaSpec.builder() + .compatibility(Schema.Compatibility.BACKWARD) + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"" + + ",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":" + + "[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"Last name of the person\"}]}") + .build()) + .build(); + + assertNotEquals(original, different); Schema differentByCompat = Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .spec(Schema.SchemaSpec.builder() - .compatibility(Schema.Compatibility.FORWARD) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .spec(Schema.SchemaSpec.builder() + .compatibility(Schema.Compatibility.FORWARD) + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":" + + "[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":" + + "[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"}," + + "{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\"," + + "\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); + + assertNotEquals(original, differentByCompat); Schema differentByMetadata = Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-two") - .build()) - .spec(Schema.SchemaSpec.builder() - .compatibility(Schema.Compatibility.FORWARD) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-two") + .build()) + .spec(Schema.SchemaSpec.builder() + .compatibility(Schema.Compatibility.FORWARD) + .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\"," + + "\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":" + + "[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\"," + + "\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}]," + + "\"default\":null,\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); - assertEquals(original,same); - assertNotEquals(original, different); - assertNotEquals(original, differentByCompat); assertNotEquals(original, differentByMetadata); } @Test void testSchemaListEquals() { SchemaList original = SchemaList.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .build(); SchemaList same = SchemaList.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .build(); SchemaList different = SchemaList.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-two") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-two") + .build()) + .build(); - assertEquals(original,same); + assertEquals(original, same); assertNotEquals(original, different); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/StreamTest.java b/src/test/java/com/michelin/ns4kafka/models/StreamTest.java index 7a93c5d5..a46cf668 100644 --- a/src/test/java/com/michelin/ns4kafka/models/StreamTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/StreamTest.java @@ -1,26 +1,26 @@ package com.michelin.ns4kafka.models; -import org.junit.jupiter.api.Test; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import org.junit.jupiter.api.Test; + class StreamTest { @Test void testEquals() { KafkaStream original = KafkaStream.builder() - .metadata(ObjectMeta.builder().name("stream1").build()) - .build(); + .metadata(ObjectMeta.builder().name("stream1").build()) + .build(); KafkaStream same = KafkaStream.builder() - .metadata(ObjectMeta.builder().name("stream1").build()) - .build(); + .metadata(ObjectMeta.builder().name("stream1").build()) + .build(); KafkaStream different = KafkaStream.builder() - .metadata(ObjectMeta.builder().name("stream2").build()) - .build(); + .metadata(ObjectMeta.builder().name("stream2").build()) + .build(); - assertEquals(original,same); + assertEquals(original, same); assertNotEquals(original, different); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/TopicTest.java b/src/test/java/com/michelin/ns4kafka/models/TopicTest.java index 81cef578..c399d8eb 100644 --- a/src/test/java/com/michelin/ns4kafka/models/TopicTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/TopicTest.java @@ -1,85 +1,87 @@ package com.michelin.ns4kafka.models; -import org.junit.jupiter.api.Test; - -import java.util.Map; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import java.util.Map; +import org.junit.jupiter.api.Test; + class TopicTest { @Test - void testEquals(){ + void testEquals() { Topic original = Topic.builder() - .metadata(ObjectMeta.builder().name("topic1").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("k1","v1", - "k2", "v2")) - .build()) - .status(Topic.TopicStatus.ofPending()) - .build(); + .metadata(ObjectMeta.builder().name("topic1").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Topic.TopicStatus.ofPending()) + .build(); Topic same = Topic.builder() - .metadata(ObjectMeta.builder().name("topic1").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("k1","v1", - "k2", "v2")) - .build()) - .status(Topic.TopicStatus.ofSuccess("Created !")) - .build(); + .metadata(ObjectMeta.builder().name("topic1").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Topic.TopicStatus.ofSuccess("Created !")) + .build(); + + assertEquals(original, same); Topic differentByMetadata = Topic.builder() - .metadata(ObjectMeta.builder().name("topic2").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("k1","v1", - "k2", "v2")) - .build()) - .status(Topic.TopicStatus.ofPending()) - .build(); + .metadata(ObjectMeta.builder().name("topic2").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Topic.TopicStatus.ofPending()) + .build(); + + assertNotEquals(original, differentByMetadata); Topic differentByReplicationFactor = Topic.builder() - .metadata(ObjectMeta.builder().name("topic2").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(99) - .partitions(3) - .configs(Map.of("k1","v1", - "k2", "v2")) - .build()) - .status(Topic.TopicStatus.ofPending()) - .build(); + .metadata(ObjectMeta.builder().name("topic2").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(99) + .partitions(3) + .configs(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Topic.TopicStatus.ofPending()) + .build(); + + assertNotEquals(original, differentByReplicationFactor); Topic differentByPartitions = Topic.builder() - .metadata(ObjectMeta.builder().name("topic2").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(99) - .configs(Map.of("k1","v1", - "k2", "v2")) - .build()) - .status(Topic.TopicStatus.ofPending()) - .build(); + .metadata(ObjectMeta.builder().name("topic2").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(99) + .configs(Map.of("k1", "v1", + "k2", "v2")) + .build()) + .status(Topic.TopicStatus.ofPending()) + .build(); - Topic differentByConfigs = Topic.builder() - .metadata(ObjectMeta.builder().name("topic2").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("k1","v1")) - .build()) - .status(Topic.TopicStatus.ofPending()) - .build(); + assertNotEquals(original, differentByPartitions); - assertEquals(original,same); + Topic differentByConfigs = Topic.builder() + .metadata(ObjectMeta.builder().name("topic2").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("k1", "v1")) + .build()) + .status(Topic.TopicStatus.ofPending()) + .build(); - assertNotEquals(original, differentByMetadata); - assertNotEquals(original, differentByReplicationFactor); - assertNotEquals(original, differentByPartitions); assertNotEquals(original, differentByConfigs); } } diff --git a/src/test/java/com/michelin/ns4kafka/models/TopicValidatorTest.java b/src/test/java/com/michelin/ns4kafka/models/TopicValidatorTest.java index 93a1e317..1f6f43c5 100644 --- a/src/test/java/com/michelin/ns4kafka/models/TopicValidatorTest.java +++ b/src/test/java/com/michelin/ns4kafka/models/TopicValidatorTest.java @@ -1,100 +1,104 @@ package com.michelin.ns4kafka.models; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.michelin.ns4kafka.validation.ResourceValidator; import com.michelin.ns4kafka.validation.TopicValidator; -import org.junit.jupiter.api.Test; - import java.util.List; import java.util.Map; - -import static org.junit.jupiter.api.Assertions.*; +import org.junit.jupiter.api.Test; class TopicValidatorTest { @Test void testEquals() { TopicValidator original = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); TopicValidator same = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); TopicValidator sameReordered = TopicValidator.builder() - .validationConstraints( - Map.of("partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - // move from position 1 - "replication.factor", ResourceValidator.Range.between(3, 3), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); + .validationConstraints( + Map.of("partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + // move from position 1 + "replication.factor", ResourceValidator.Range.between(3, 3), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); TopicValidator differentByKey = TopicValidator.builder() - .validationConstraints( - Map.of("DIFFERENT_replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); - - TopicValidator differentByVal = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 99999999), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); - - TopicValidator differentBySize = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2))) - .build(); + .validationConstraints( + Map.of("DIFFERENT_replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); assertEquals(original, same); assertEquals(original, sameReordered); assertNotEquals(original, differentByKey); + + TopicValidator differentByVal = TopicValidator.builder() + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 99999999), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); + assertNotEquals(original, differentByVal); + + TopicValidator differentBySize = TopicValidator.builder() + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2))) + .build(); + assertNotEquals(original, differentBySize); } @Test void testEnsureValidGlobal() { TopicValidator topicValidator = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); Topic success = Topic.builder() - .metadata(ObjectMeta.builder().name("valid_name").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("valid_name").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); List actual = topicValidator.validate(success); assertTrue(actual.isEmpty()); @@ -103,50 +107,50 @@ void testEnsureValidGlobal() { @Test void testEnsureValidName() { TopicValidator nameValidator = TopicValidator.builder() - .validationConstraints(Map.of()) - .build(); + .validationConstraints(Map.of()) + .build(); Topic invalidTopic; List validationErrors; invalidTopic = Topic.builder() - .metadata(ObjectMeta.builder().name("").build()) - .spec(Topic.TopicSpec.builder().build()) - .build(); + .metadata(ObjectMeta.builder().name("").build()) + .spec(Topic.TopicSpec.builder().build()) + .build(); validationErrors = nameValidator.validate(invalidTopic); assertEquals(2, validationErrors.size()); assertLinesMatch( - List.of(".*Value must not be empty.*",".*Value must only contain.*"), - validationErrors); + List.of(".*Value must not be empty.*", ".*Value must only contain.*"), + validationErrors); invalidTopic = Topic.builder() - .metadata(ObjectMeta.builder().name(".").build()) - .spec(Topic.TopicSpec.builder().build()).build(); + .metadata(ObjectMeta.builder().name(".").build()) + .spec(Topic.TopicSpec.builder().build()).build(); validationErrors = nameValidator.validate(invalidTopic); assertEquals(1, validationErrors.size()); invalidTopic = Topic.builder() - .metadata(ObjectMeta.builder().name("..").build()) - .spec(Topic.TopicSpec.builder().build()).build(); + .metadata(ObjectMeta.builder().name("..").build()) + .spec(Topic.TopicSpec.builder().build()).build(); validationErrors = nameValidator.validate(invalidTopic); assertEquals(1, validationErrors.size()); invalidTopic = Topic.builder() - .metadata(ObjectMeta.builder().name("A".repeat(260)).build()) - .spec(Topic.TopicSpec.builder().build()).build(); + .metadata(ObjectMeta.builder().name("A".repeat(260)).build()) + .spec(Topic.TopicSpec.builder().build()).build(); validationErrors = nameValidator.validate(invalidTopic); assertEquals(1, validationErrors.size()); invalidTopic = Topic.builder() - .metadata(ObjectMeta.builder().name("A B").build()) - .spec(Topic.TopicSpec.builder().build()).build(); + .metadata(ObjectMeta.builder().name("A B").build()) + .spec(Topic.TopicSpec.builder().build()).build(); validationErrors = nameValidator.validate(invalidTopic); assertEquals(1, validationErrors.size()); invalidTopic = Topic.builder() - .metadata(ObjectMeta.builder().name("topicname actual = topicValidator.validate(topic); assertTrue(actual.isEmpty()); @@ -175,15 +179,15 @@ void shouldValidateWithNoValidationConstraint() { @Test void shouldValidateWithNoValidationConstraintAndNoConfig() { TopicValidator topicValidator = TopicValidator.builder() - .build(); + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("validName").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("validName").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .build()) + .build(); List actual = topicValidator.validate(topic); assertTrue(actual.isEmpty()); @@ -192,21 +196,21 @@ void shouldValidateWithNoValidationConstraintAndNoConfig() { @Test void shouldValidateWithNoConfig() { TopicValidator topicValidator = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("validName").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("validName").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .build()) + .build(); List actual = topicValidator.validate(topic); assertEquals(3, actual.size()); @@ -218,25 +222,25 @@ void shouldValidateWithNoConfig() { @Test void shouldNotValidateBecauseConfigWithoutConstraint() { TopicValidator topicValidator = TopicValidator.builder() - .validationConstraints( - Map.of("replication.factor", ResourceValidator.Range.between(3, 3), - "partitions", ResourceValidator.Range.between(3, 6), - "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), - "min.insync.replicas", ResourceValidator.Range.between(2, 2), - "retention.ms", ResourceValidator.Range.between(60000, 604800000))) - .build(); + .validationConstraints( + Map.of("replication.factor", ResourceValidator.Range.between(3, 3), + "partitions", ResourceValidator.Range.between(3, 6), + "cleanup.policy", ResourceValidator.ValidList.in("delete", "compact"), + "min.insync.replicas", ResourceValidator.Range.between(2, 2), + "retention.ms", ResourceValidator.Range.between(60000, 604800000))) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("validName").build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy", "delete", - "min.insync.replicas", "2", - "retention.ms", "60000", - "retention.bytes", "50")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("validName").build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000", + "retention.bytes", "50")) + .build()) + .build(); List actual = topicValidator.validate(topic); assertEquals(1, actual.size()); diff --git a/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java b/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java index 24672ab2..02ad4226 100644 --- a/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java +++ b/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationProviderTest.java @@ -1,8 +1,13 @@ package com.michelin.ns4kafka.security; -import com.michelin.ns4kafka.config.SecurityConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.security.gitlab.GitlabAuthenticationProvider; import com.michelin.ns4kafka.security.gitlab.GitlabAuthenticationService; import com.michelin.ns4kafka.services.RoleBindingService; @@ -12,6 +17,7 @@ import io.micronaut.security.authentication.AuthenticationRequest; import io.micronaut.security.authentication.AuthenticationResponse; import io.micronaut.security.authentication.UsernamePasswordCredentials; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -22,11 +28,6 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class GitlabAuthenticationProviderTest { @Mock @@ -39,125 +40,125 @@ class GitlabAuthenticationProviderTest { RoleBindingService roleBindingService; @Mock - SecurityConfig securityConfig; + SecurityProperties securityProperties; @InjectMocks GitlabAuthenticationProvider gitlabAuthenticationProvider; - /** - * Assert the user authentication is successful - */ @Test void authenticationSuccess() { - AuthenticationRequest authenticationRequest = new UsernamePasswordCredentials("username","53cu23d_70k3n"); + AuthenticationRequest authenticationRequest = + new UsernamePasswordCredentials("username", "53cu23d_70k3n"); - List groups = List.of("group-1","group-2"); + List groups = List.of("group-1", "group-2"); RoleBinding roleBinding = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("namespace-rb1") - .cluster("local") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .subject(RoleBinding.Subject.builder() - .subjectName("group-1") - .subjectType(RoleBinding.SubjectType.GROUP) - .build()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace-rb1") + .cluster("local") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .subject(RoleBinding.Subject.builder() + .subjectName("group-1") + .subjectType(RoleBinding.SubjectType.GROUP) + .build()) + .build()) + .build(); when(gitlabAuthenticationService.findUsername(authenticationRequest.getSecret())) - .thenReturn(Mono.just("email")); + .thenReturn(Mono.just("email")); when(gitlabAuthenticationService.findAllGroups(authenticationRequest.getSecret())) - .thenReturn(Flux.fromIterable(groups)); + .thenReturn(Flux.fromIterable(groups)); when(roleBindingService.listByGroups(groups)) - .thenReturn(List.of(roleBinding)); + .thenReturn(List.of(roleBinding)); when(resourceBasedSecurityRule.computeRolesFromGroups(groups)) - .thenReturn(List.of()); + .thenReturn(List.of()); - Publisher authenticationResponsePublisher = gitlabAuthenticationProvider.authenticate(null, authenticationRequest); + Publisher authenticationResponsePublisher = + gitlabAuthenticationProvider.authenticate(null, authenticationRequest); StepVerifier.create(authenticationResponsePublisher) - .consumeNextWith(response -> { - assertTrue(response.isAuthenticated()); - assertTrue(response.getAuthentication().isPresent()); - assertEquals("email", response.getAuthentication().get().getName()); - assertIterableEquals(groups, (List) response.getAuthentication().get().getAttributes().get( "groups")); - assertIterableEquals(List.of(), response.getAuthentication().get().getRoles(), "User has no custom roles"); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertTrue(response.isAuthenticated()); + assertTrue(response.getAuthentication().isPresent()); + assertEquals("email", response.getAuthentication().get().getName()); + assertIterableEquals(groups, + (List) response.getAuthentication().get().getAttributes().get("groups")); + assertIterableEquals(List.of(), response.getAuthentication().get().getRoles(), + "User has no custom roles"); + }) + .verifyComplete(); } - /** - * Assert the admin authentication is successful - */ @Test void authenticationSuccessAdmin() { - AuthenticationRequest authenticationRequest = new UsernamePasswordCredentials("admin","53cu23d_70k3n"); + AuthenticationRequest authenticationRequest = + new UsernamePasswordCredentials("admin", "53cu23d_70k3n"); - List groups = List.of("group-1","group-2","group-admin"); + List groups = List.of("group-1", "group-2", "group-admin"); when(gitlabAuthenticationService.findUsername(authenticationRequest.getSecret())) - .thenReturn(Mono.just("email")); + .thenReturn(Mono.just("email")); when(gitlabAuthenticationService.findAllGroups(authenticationRequest.getSecret())) - .thenReturn(Flux.fromIterable(groups)); + .thenReturn(Flux.fromIterable(groups)); when(roleBindingService.listByGroups(groups)) - .thenReturn(List.of()); - when(securityConfig.getAdminGroup()) - .thenReturn("group-admin"); + .thenReturn(List.of()); + when(securityProperties.getAdminGroup()) + .thenReturn("group-admin"); when(resourceBasedSecurityRule.computeRolesFromGroups(groups)) - .thenReturn(List.of(ResourceBasedSecurityRule.IS_ADMIN)); + .thenReturn(List.of(ResourceBasedSecurityRule.IS_ADMIN)); - Publisher authenticationResponsePublisher = gitlabAuthenticationProvider.authenticate(null, authenticationRequest); + Publisher authenticationResponsePublisher = + gitlabAuthenticationProvider.authenticate(null, authenticationRequest); StepVerifier.create(authenticationResponsePublisher) - .consumeNextWith(response -> { - assertTrue(response.isAuthenticated()); - assertTrue(response.getAuthentication().isPresent()); - assertEquals("email", response.getAuthentication().get().getName()); - assertIterableEquals(groups, (List) response.getAuthentication().get().getAttributes().get( "groups")); - assertIterableEquals(List.of(ResourceBasedSecurityRule.IS_ADMIN), response.getAuthentication().get().getRoles(), "User has custom roles"); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertTrue(response.isAuthenticated()); + assertTrue(response.getAuthentication().isPresent()); + assertEquals("email", response.getAuthentication().get().getName()); + assertIterableEquals(groups, + (List) response.getAuthentication().get().getAttributes().get("groups")); + assertIterableEquals(List.of(ResourceBasedSecurityRule.IS_ADMIN), + response.getAuthentication().get().getRoles(), "User has custom roles"); + }) + .verifyComplete(); } - /** - * Assert the authentication fails when GitLab responds HTTP 403 - */ @Test void authenticationFailure() { - AuthenticationRequest authenticationRequest = new UsernamePasswordCredentials("admin","f4k3_70k3n"); + AuthenticationRequest authenticationRequest = + new UsernamePasswordCredentials("admin", "f4k3_70k3n"); when(gitlabAuthenticationService.findUsername(authenticationRequest.getSecret())) - .thenReturn(Mono.error(new HttpClientResponseException("403 Unauthorized", HttpResponse.unauthorized()))); + .thenReturn(Mono.error(new HttpClientResponseException("403 Unauthorized", HttpResponse.unauthorized()))); - Publisher authenticationResponsePublisher = gitlabAuthenticationProvider.authenticate(null, authenticationRequest); + Publisher authenticationResponsePublisher = + gitlabAuthenticationProvider.authenticate(null, authenticationRequest); StepVerifier.create(authenticationResponsePublisher) - .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) - .verify(); + .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) + .verify(); } - /** - * Assert the authentication fails when GitLab responds HTTP 403 - */ @Test void authenticationFailureGroupsNotFound() { - AuthenticationRequest authenticationRequest = new UsernamePasswordCredentials("admin","f4k3_70k3n"); + AuthenticationRequest authenticationRequest = + new UsernamePasswordCredentials("admin", "f4k3_70k3n"); - List groups = List.of("group-1","group-2"); + List groups = List.of("group-1", "group-2"); when(gitlabAuthenticationService.findUsername(authenticationRequest.getSecret())) - .thenReturn(Mono.just("email")); + .thenReturn(Mono.just("email")); when(gitlabAuthenticationService.findAllGroups(authenticationRequest.getSecret())) - .thenReturn(Flux.fromIterable(groups)); + .thenReturn(Flux.fromIterable(groups)); when(roleBindingService.listByGroups(groups)) - .thenReturn(List.of()); - when(securityConfig.getAdminGroup()) - .thenReturn("group-admin"); + .thenReturn(List.of()); + when(securityProperties.getAdminGroup()) + .thenReturn("group-admin"); - Publisher authenticationResponsePublisher = gitlabAuthenticationProvider.authenticate(null, authenticationRequest); + Publisher authenticationResponsePublisher = + gitlabAuthenticationProvider.authenticate(null, authenticationRequest); StepVerifier.create(authenticationResponsePublisher) - .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) - .verify(); + .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) + .verify(); } } diff --git a/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationServiceTest.java b/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationServiceTest.java index f59ded7d..a3abd0b4 100644 --- a/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/security/GitlabAuthenticationServiceTest.java @@ -1,9 +1,14 @@ package com.michelin.ns4kafka.security; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.security.gitlab.GitlabApiClient; import com.michelin.ns4kafka.security.gitlab.GitlabAuthenticationService; import io.micronaut.http.HttpResponse; import io.micronaut.http.MutableHttpResponse; +import java.util.List; +import java.util.Map; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -14,12 +19,6 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.List; -import java.util.Map; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class GitlabAuthenticationServiceTest { @Mock @@ -29,28 +28,28 @@ class GitlabAuthenticationServiceTest { GitlabAuthenticationService gitlabAuthenticationService; @Test - void findUserSuccess(){ + void findUserSuccess() { String token = "v4l1d_70k3n"; when(gitlabApiClient.findUser(token)) - .thenReturn(Mono.just(Map.of("user","test", "email", "user@mail.com"))); + .thenReturn(Mono.just(Map.of("user", "test", "email", "user@mail.com"))); Mono authenticationResponsePublisher = gitlabAuthenticationService.findUsername(token); StepVerifier.create(authenticationResponsePublisher) - .consumeNextWith(response -> assertEquals("user@mail.com", response)) - .verifyComplete(); + .consumeNextWith(response -> assertEquals("user@mail.com", response)) + .verifyComplete(); } @Test - void findGroupsOnePage(){ + void findGroupsOnePage() { String token = "v4l1d_70k3n"; MutableHttpResponse>> pageOneResponse = HttpResponse - .ok(List.of( - Map.of("full_path", "group1", "unusedKey", "unusedVal"), - Map.of("full_path", "group2", "unusedKey", "unusedVal"))) - .header("X-Total-Pages","1"); + .ok(List.of( + Map.of("full_path", "group1", "unusedKey", "unusedVal"), + Map.of("full_path", "group2", "unusedKey", "unusedVal"))) + .header("X-Total-Pages", "1"); - when(gitlabApiClient.getGroupsPage(token,1)).thenReturn(Flux.just(pageOneResponse)); + when(gitlabApiClient.getGroupsPage(token, 1)).thenReturn(Flux.just(pageOneResponse)); Flux authenticationResponsePublisher = gitlabAuthenticationService.findAllGroups(token); @@ -61,41 +60,41 @@ void findGroupsOnePage(){ } @Test - void findGroupsThreePages(){ + void findGroupsThreePages() { String token = "v4l1d_70k3n"; MutableHttpResponse>> pageOneResponse = HttpResponse - .ok(List.of( - Map.of("full_path", "group1", "unusedKey", "unusedVal"), - Map.of("full_path", "group2", "unusedKey", "unusedVal"))) - .header("X-Next-Page","2") - .header("X-Total-Pages","3"); + .ok(List.of( + Map.of("full_path", "group1", "unusedKey", "unusedVal"), + Map.of("full_path", "group2", "unusedKey", "unusedVal"))) + .header("X-Next-Page", "2") + .header("X-Total-Pages", "3"); MutableHttpResponse>> pageTwoResponse = HttpResponse - .ok(List.of( - Map.of("full_path", "group3", "unusedKey", "unusedVal"), - Map.of("full_path", "group4", "unusedKey", "unusedVal"))) - .header("X-Next-Page","3") - .header("X-Total-Pages","3"); + .ok(List.of( + Map.of("full_path", "group3", "unusedKey", "unusedVal"), + Map.of("full_path", "group4", "unusedKey", "unusedVal"))) + .header("X-Next-Page", "3") + .header("X-Total-Pages", "3"); MutableHttpResponse>> pageThreeResponse = HttpResponse - .ok(List.of( - Map.of("full_path", "group5", "unusedKey", "unusedVal"), - Map.of("full_path", "group6", "unusedKey", "unusedVal"))) - .header("X-Total-Pages","3"); + .ok(List.of( + Map.of("full_path", "group5", "unusedKey", "unusedVal"), + Map.of("full_path", "group6", "unusedKey", "unusedVal"))) + .header("X-Total-Pages", "3"); - when(gitlabApiClient.getGroupsPage(token,1)).thenReturn(Flux.just(pageOneResponse)); - when(gitlabApiClient.getGroupsPage(token,2)).thenReturn(Flux.just(pageTwoResponse)); - when(gitlabApiClient.getGroupsPage(token,3)).thenReturn(Flux.just(pageThreeResponse)); + when(gitlabApiClient.getGroupsPage(token, 1)).thenReturn(Flux.just(pageOneResponse)); + when(gitlabApiClient.getGroupsPage(token, 2)).thenReturn(Flux.just(pageTwoResponse)); + when(gitlabApiClient.getGroupsPage(token, 3)).thenReturn(Flux.just(pageThreeResponse)); Publisher authenticationResponsePublisher = gitlabAuthenticationService.findAllGroups(token); StepVerifier.create(authenticationResponsePublisher) - .consumeNextWith(response -> assertEquals("group1", response)) - .consumeNextWith(response -> assertEquals("group2", response)) - .consumeNextWith(response -> assertEquals("group3", response)) - .consumeNextWith(response -> assertEquals("group4", response)) - .consumeNextWith(response -> assertEquals("group5", response)) - .consumeNextWith(response -> assertEquals("group6", response)) - .verifyComplete(); + .consumeNextWith(response -> assertEquals("group1", response)) + .consumeNextWith(response -> assertEquals("group2", response)) + .consumeNextWith(response -> assertEquals("group3", response)) + .consumeNextWith(response -> assertEquals("group4", response)) + .consumeNextWith(response -> assertEquals("group5", response)) + .consumeNextWith(response -> assertEquals("group6", response)) + .verifyComplete(); } } diff --git a/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java b/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java index 95ed1ff0..016d8dca 100644 --- a/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java +++ b/src/test/java/com/michelin/ns4kafka/security/LocalUserAuthenticationProviderTest.java @@ -1,11 +1,16 @@ package com.michelin.ns4kafka.security; -import com.michelin.ns4kafka.config.SecurityConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.security.local.LocalUser; import com.michelin.ns4kafka.security.local.LocalUserAuthenticationProvider; import io.micronaut.security.authentication.AuthenticationException; import io.micronaut.security.authentication.AuthenticationResponse; import io.micronaut.security.authentication.UsernamePasswordCredentials; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentMatchers; @@ -15,16 +20,10 @@ import org.reactivestreams.Publisher; import reactor.test.StepVerifier; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class LocalUserAuthenticationProviderTest { @Mock - SecurityConfig securityConfig; + SecurityProperties securityProperties; @Mock ResourceBasedSecurityRule resourceBasedSecurityRule; @@ -36,56 +35,59 @@ class LocalUserAuthenticationProviderTest { void authenticateNoMatchUser() { UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - when(securityConfig.getLocalUsers()) - .thenReturn(List.of()); + when(securityProperties.getLocalUsers()) + .thenReturn(List.of()); - Publisher authenticationResponsePublisher = localUserAuthenticationProvider.authenticate(null, credentials); + Publisher authenticationResponsePublisher = + localUserAuthenticationProvider.authenticate(null, credentials); StepVerifier.create(authenticationResponsePublisher) - .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) - .verify(); + .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) + .verify(); } @Test void authenticateMatchUserNoMatchPassword() { UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - when(securityConfig.getLocalUsers()) - .thenReturn(List.of(LocalUser.builder() - .username("admin") - .password("invalid_sha256_signature") - .build())); + when(securityProperties.getLocalUsers()) + .thenReturn(List.of(LocalUser.builder() + .username("admin") + .password("invalid_sha256_signature") + .build())); - Publisher authenticationResponsePublisher = localUserAuthenticationProvider.authenticate(null, + Publisher authenticationResponsePublisher = + localUserAuthenticationProvider.authenticate(null, credentials); StepVerifier.create(authenticationResponsePublisher) - .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) - .verify(); + .consumeErrorWith(error -> assertEquals(AuthenticationException.class, error.getClass())) + .verify(); } @Test void authenticateMatchUserMatchPassword() { UsernamePasswordCredentials credentials = new UsernamePasswordCredentials("admin", "admin"); - when(securityConfig.getLocalUsers()) - .thenReturn(List.of(LocalUser.builder() - .username("admin") - .password("8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918") - .groups(List.of("admin")) - .build())); + when(securityProperties.getLocalUsers()) + .thenReturn(List.of(LocalUser.builder() + .username("admin") + .password("8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918") + .groups(List.of("admin")) + .build())); when(resourceBasedSecurityRule.computeRolesFromGroups(ArgumentMatchers.any())) - .thenReturn(List.of()); + .thenReturn(List.of()); - Publisher authenticationResponsePublisher = localUserAuthenticationProvider.authenticate(null, credentials); + Publisher authenticationResponsePublisher = + localUserAuthenticationProvider.authenticate(null, credentials); StepVerifier.create(authenticationResponsePublisher) - .consumeNextWith(response -> { - assertTrue(response.isAuthenticated()); - assertTrue(response.getAuthentication().isPresent()); - assertEquals("admin", response.getAuthentication().get().getName()); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertTrue(response.isAuthenticated()); + assertTrue(response.getAuthentication().isPresent()); + assertEquals("admin", response.getAuthentication().get().getName()); + }) + .verifyComplete(); } } diff --git a/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java b/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java index 65bbe01f..b58164f6 100644 --- a/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java +++ b/src/test/java/com/michelin/ns4kafka/security/ResourceBasedSecurityRuleTest.java @@ -1,14 +1,20 @@ package com.michelin.ns4kafka.security; -import com.michelin.ns4kafka.config.SecurityConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.repositories.NamespaceRepository; import com.michelin.ns4kafka.repositories.RoleBindingRepository; import io.micronaut.http.HttpRequest; import io.micronaut.security.authentication.Authentication; import io.micronaut.security.rules.SecurityRuleResult; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -16,13 +22,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class ResourceBasedSecurityRuleTest { @Mock @@ -32,21 +31,21 @@ class ResourceBasedSecurityRuleTest { RoleBindingRepository roleBindingRepository; @Mock - SecurityConfig securityConfig; + SecurityProperties securityProperties; @InjectMocks ResourceBasedSecurityRule resourceBasedSecurityRule; @Test - void checkReturnsUnknownUnauthenticated(){ - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/anything"),null); + void checkReturnsUnknownUnauthenticated() { + SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/anything"), null); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void checkReturnsUnknownMissingClaims(){ + void checkReturnsUnknownMissingClaims() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups); + Map claims = Map.of("sub", "user", "groups", groups); Authentication auth = Authentication.build("user", claims); SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/anything"), auth); @@ -54,251 +53,268 @@ void checkReturnsUnknownMissingClaims(){ } @Test - void checkReturnsUnknownInvalidResource(){ + void checkReturnsUnknownInvalidResource() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/non-namespaced/resource"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/non-namespaced/resource"), auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void checkReturnsUnknownNoRoleBinding(){ + void checkReturnsUnknownNoRoleBinding() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of()); + .thenReturn(List.of()); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void checkReturnsUnknownInvalidNamespace(){ + void checkReturnsUnknownInvalidNamespace() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void checkReturnsUnknownAdminNamespaceAsNotAdmin(){ + void checkReturnsUnknownAdminNamespaceAsNotAdmin() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/admin/connectors"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/admin/connectors"), auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void checkReturnsUnknownInvalidNamespaceAsAdmin(){ + void checkReturnsUnknownInvalidNamespaceAsAdmin() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of("isAdmin()")); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of("isAdmin()")); Authentication auth = Authentication.build("user", List.of("isAdmin()"), claims); when(namespaceRepository.findByName("admin")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/admin/connectors"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/admin/connectors"), auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void checkReturnsAllowedNamespaceAsAdmin(){ + void checkReturnsAllowedNamespaceAsAdmin() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of("isAdmin()")); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of("isAdmin()")); Authentication auth = Authentication.build("user", List.of("isAdmin()"), claims); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); assertEquals(SecurityRuleResult.ALLOWED, actual); } @Test void checkReturnsAllowed() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of(RoleBinding.builder() - .metadata(ObjectMeta.builder().namespace("test") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("connectors")) - .verbs(List.of(RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder().subjectName("group1") - .build()) - .build()) - .build())); + .thenReturn(List.of(RoleBinding.builder() + .metadata(ObjectMeta.builder().namespace("test") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("connectors")) + .verbs(List.of(RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder().subjectName("group1") + .build()) + .build()) + .build())); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors"), auth); assertEquals(SecurityRuleResult.ALLOWED, actual); } @Test - void CheckReturnsAllowedSubresource() { + void checkReturnsAllowedSubresource() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of(RoleBinding.builder() - .metadata(ObjectMeta.builder().namespace("test") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("connectors/restart","topics/delete-records")) - .verbs(List.of(RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder().subjectName("group1") - .build()) - .build()) - .build())); + .thenReturn(List.of(RoleBinding.builder() + .metadata(ObjectMeta.builder().namespace("test") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("connectors/restart", "topics/delete-records")) + .verbs(List.of(RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder().subjectName("group1") + .build()) + .build()) + .build())); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors/name/restart"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors/name/restart"), + auth); assertEquals(SecurityRuleResult.ALLOWED, actual); - actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/topics/name/delete-records"), auth); + actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/topics/name/delete-records"), + auth); assertEquals(SecurityRuleResult.ALLOWED, actual); } @Test - void CheckReturnsAllowedResourceWithHyphen() { + void checkReturnsAllowedResourceWithHyphen() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of(RoleBinding.builder() - .metadata(ObjectMeta.builder().namespace("test") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("role-bindings")) - .verbs(List.of(RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder().subjectName("group1") - .build()) - .build()) - .build())); + .thenReturn(List.of(RoleBinding.builder() + .metadata(ObjectMeta.builder().namespace("test") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("role-bindings")) + .verbs(List.of(RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder().subjectName("group1") + .build()) + .build()) + .build())); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/role-bindings"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/role-bindings"), auth); assertEquals(SecurityRuleResult.ALLOWED, actual); } @Test - void CheckReturnsAllowedResourceNameWithDot() { + void checkReturnsAllowedResourceNameWithDot() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of(RoleBinding.builder() - .metadata(ObjectMeta.builder().namespace("test") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("topics")) - .verbs(List.of(RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder().subjectName("group1") - .build()) - .build()) - .build())); + .thenReturn(List.of(RoleBinding.builder() + .metadata(ObjectMeta.builder().namespace("test") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("topics")) + .verbs(List.of(RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder().subjectName("group1") + .build()) + .build()) + .build())); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/topics/topic.with.dots"), auth); + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/topics/topic.with.dots"), + auth); assertEquals(SecurityRuleResult.ALLOWED, actual); } @Test - void CheckReturnsUnknownSubResource(){ + void checkReturnsUnknownSubResource() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of(RoleBinding.builder() - .metadata(ObjectMeta.builder().namespace("test") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("connectors")) - .verbs(List.of(RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder().subjectName("group1") - .build()) - .build()) - .build())); - - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors/name/restart"), auth); + .thenReturn(List.of(RoleBinding.builder() + .metadata(ObjectMeta.builder().namespace("test") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("connectors")) + .verbs(List.of(RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder().subjectName("group1") + .build()) + .build()) + .build())); + + SecurityRuleResult actual = + resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors/name/restart"), + auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void CheckReturnsUnknownSubResourceWithDot(){ + void checkReturnsUnknownSubResourceWithDot() { List groups = List.of("group1"); - Map claims = Map.of("sub","user", "groups", groups, "roles", List.of()); + Map claims = Map.of("sub", "user", "groups", groups, "roles", List.of()); Authentication auth = Authentication.build("user", claims); when(namespaceRepository.findByName("test")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); when(roleBindingRepository.findAllForGroups(groups)) - .thenReturn(List.of(RoleBinding.builder() - .metadata(ObjectMeta.builder().namespace("test") - .build()) - .spec(RoleBinding.RoleBindingSpec.builder() - .role(RoleBinding.Role.builder() - .resourceTypes(List.of("connectors")) - .verbs(List.of(RoleBinding.Verb.GET)) - .build()) - .subject(RoleBinding.Subject.builder().subjectName("group1") - .build()) - .build()) - .build())); - - SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity(HttpRequest.GET("/api/namespaces/test/connectors/name.with.dots/restart"), auth); + .thenReturn(List.of(RoleBinding.builder() + .metadata(ObjectMeta.builder().namespace("test") + .build()) + .spec(RoleBinding.RoleBindingSpec.builder() + .role(RoleBinding.Role.builder() + .resourceTypes(List.of("connectors")) + .verbs(List.of(RoleBinding.Verb.GET)) + .build()) + .subject(RoleBinding.Subject.builder().subjectName("group1") + .build()) + .build()) + .build())); + + SecurityRuleResult actual = resourceBasedSecurityRule.checkSecurity( + HttpRequest.GET("/api/namespaces/test/connectors/name.with.dots/restart"), auth); assertEquals(SecurityRuleResult.UNKNOWN, actual); } @Test - void ComputeRolesNoAdmin() { - when(securityConfig.getAdminGroup()) - .thenReturn("admin-group"); + void computeRolesNoAdmin() { + when(securityProperties.getAdminGroup()) + .thenReturn("admin-group"); List actual = resourceBasedSecurityRule.computeRolesFromGroups(List.of("not-admin")); Assertions.assertIterableEquals(List.of(), actual); } @Test - void ComputeRolesAdmin() { - when(securityConfig.getAdminGroup()) - .thenReturn("admin-group"); + void computeRolesAdmin() { + when(securityProperties.getAdminGroup()) + .thenReturn("admin-group"); List actual = resourceBasedSecurityRule.computeRolesFromGroups(List.of("admin-group")); Assertions.assertIterableEquals(List.of(ResourceBasedSecurityRule.IS_ADMIN), actual); diff --git a/src/test/java/com/michelin/ns4kafka/services/AccessControlEntryServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/AccessControlEntryServiceTest.java index d0f186d2..d2f41baa 100644 --- a/src/test/java/com/michelin/ns4kafka/services/AccessControlEntryServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/AccessControlEntryServiceTest.java @@ -1,145 +1,154 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.repositories.AccessControlEntryRepository; import io.micronaut.context.ApplicationContext; +import java.util.List; +import java.util.Optional; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.when; - +/** + * Access control entry service test. + */ @ExtendWith(MockitoExtension.class) class AccessControlEntryServiceTest { @Mock AccessControlEntryRepository accessControlEntryRepository; + @Mock ApplicationContext applicationContext; - NamespaceService namespaceService = Mockito.mock(NamespaceService.class); + + @Mock + NamespaceService namespaceService; @InjectMocks AccessControlEntryService accessControlEntryService; @Test - void validate_NotAllowedResources() { + void validateNotAllowedResources() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); - AccessControlEntry badACL = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); + + AccessControlEntry badAcl = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test") + .grantedTo("target-ns") + .build()) + .build(); when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("target-ns")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of()); - List actual = accessControlEntryService.validate(badACL, ns); + .thenReturn(List.of()); + List actual = accessControlEntryService.validate(badAcl, ns); assertLinesMatch(List.of( "^Invalid value CONNECT for resourceType.*", "^Invalid value OWNER for permission.*", "^Invalid value target-ns for grantedTo.*", "^Invalid grant PREFIXED:.*"), - actual); + actual); } @Test - void validate_NotAllowedSelfGrant() { + void validateNotAllowedSelfGrant() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); - AccessControlEntry badACL = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("test") - .grantedTo("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); + + AccessControlEntry badAcl = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ) + .resource("test") + .grantedTo("namespace") + .build()) + .build(); + when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("namespace")) - .thenReturn(Optional.of(ns)); + .thenReturn(Optional.of(ns)); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of()); - List actual = accessControlEntryService.validate(badACL, ns); + .thenReturn(List.of()); + List actual = accessControlEntryService.validate(badAcl, ns); assertLinesMatch(List.of( "^Invalid value namespace for grantedTo.*", "^Invalid grant PREFIXED:.*"), - actual); + actual); } @Test - void validate_NotAllowedOwnerOfBadPrefix() { + void validateNotAllowedOwnerOfBadPrefix() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); AccessControlEntry accessControlEntry = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("main") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ) + .resource("main") + .grantedTo("target-ns") + .build()) + .build(); when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("target-ns")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("main.sub") - .grantedTo("namespace") - .build() - ) - .build() - )); + .thenReturn(List.of(AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("main.sub") + .grantedTo("namespace") + .build() + ) + .build() + )); List actual = accessControlEntryService.validate(accessControlEntry, ns); assertLinesMatch(List.of("^Invalid grant PREFIXED:.*"), actual); } @@ -147,40 +156,40 @@ void validate_NotAllowedOwnerOfBadPrefix() { @Test void validate_NotAllowedOwnerOfBadLiteral() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); AccessControlEntry accessControlEntry = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.READ) - .resource("resource2") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.READ) + .resource("resource2") + .grantedTo("target-ns") + .build()) + .build(); when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("target-ns")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("resource1") - .grantedTo("namespace") - .build() - ) - .build() - )); + .thenReturn(List.of(AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("resource1") + .grantedTo("namespace") + .build() + ) + .build() + )); List actual = accessControlEntryService.validate(accessControlEntry, ns); assertLinesMatch(List.of("^Invalid grant LITERAL:.*"), actual); } @@ -188,40 +197,40 @@ void validate_NotAllowedOwnerOfBadLiteral() { @Test void validate_AllowedOwnerOfLiteral() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); AccessControlEntry accessControlEntry = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.READ) - .resource("resource1") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.READ) + .resource("resource1") + .grantedTo("target-ns") + .build()) + .build(); when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("target-ns")) - .thenReturn(Optional.of(Namespace.builder().build())); + .thenReturn(Optional.of(Namespace.builder().build())); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("resource1") - .grantedTo("namespace") - .build() - ) - .build() - )); + .thenReturn(List.of(AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("resource1") + .grantedTo("namespace") + .build() + ) + .build() + )); List actual = accessControlEntryService.validate(accessControlEntry, ns); assertTrue(actual.isEmpty()); } @@ -229,40 +238,41 @@ void validate_AllowedOwnerOfLiteral() { @Test void validate_AllowedOwnerOfPrefix() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); AccessControlEntry accessControlEntry = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("main.sub") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ) + .resource("main.sub") + .grantedTo("target-ns") + .build()) + .build(); when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("target-ns")) - .thenReturn(Optional.of(Namespace.builder().metadata(ObjectMeta.builder().name("target-ns").build()).build())); + .thenReturn( + Optional.of(Namespace.builder().metadata(ObjectMeta.builder().name("target-ns").build()).build())); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("main") - .grantedTo("namespace") - .build() - ) - .build() - )); + .thenReturn(List.of(AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("main") + .grantedTo("namespace") + .build() + ) + .build() + )); List actual = accessControlEntryService.validate(accessControlEntry, ns); assertTrue(actual.isEmpty()); } @@ -270,76 +280,77 @@ void validate_AllowedOwnerOfPrefix() { @Test void validate_AllowedPublicGrantedTo() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); AccessControlEntry accessControlEntry = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("namespace") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("main.sub") - .grantedTo("*") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("namespace") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ) + .resource("main.sub") + .grantedTo("*") + .build()) + .build(); when(applicationContext.getBean(NamespaceService.class)) - .thenReturn(namespaceService); + .thenReturn(namespaceService); when(namespaceService.findByName("*")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("main") - .grantedTo("namespace") - .build() - ) - .build() - )); + .thenReturn(List.of(AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("main") + .grantedTo("namespace") + .build() + ) + .build() + )); List actual = accessControlEntryService.validate(accessControlEntry, ns); assertTrue(actual.isEmpty()); } @Test - void validateAsAdmin_SuccessUpdatingExistingACL() { + void validateAsAdminSuccessUpdatingExistingAcl() { AccessControlEntry accessControlEntry = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-name") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.READ) - .resource("main.sub") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-name") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.READ) + .resource("main.sub") + .grantedTo("target-ns") + .build()) + .build(); Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("target-ns") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("target-ns") + .cluster("local") + .build()) + .build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(accessControlEntry)); + .thenReturn(List.of(accessControlEntry)); List actual = accessControlEntryService.validateAsAdmin(accessControlEntry, namespace); assertTrue(actual.isEmpty()); } + @Test - void validateAsAdmin_FailSameOverlap() { + void validateAsAdminFailSameOverlap() { // another namespace is already OWNER of PREFIXED or LITERAL resource // exemple : // if already exists: @@ -357,69 +368,69 @@ void validateAsAdmin_FailSameOverlap() { // namespace2 OWNER:LITERAL:project2 OK 8 // namespace2 OWNER:LITERAL:proj OK 9 AccessControlEntry existing1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing1") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing1") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1") + .grantedTo("other-ns") + .build()) + .build(); AccessControlEntry existing2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing2") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2_t1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing2") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2_t1") + .grantedTo("other-ns") + .build()) + .build(); Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("target-ns") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("target-ns") + .cluster("local") + .build()) + .build(); AccessControlEntry toCreate1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1") + .grantedTo("target-ns") + .build()) + .build(); AccessControlEntry toCreate2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2_t1") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2_t1") + .grantedTo("target-ns") + .build()) + .build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(existing1, existing2)); + .thenReturn(List.of(existing1, existing2)); // Test 1 List actual = accessControlEntryService.validateAsAdmin(toCreate1, namespace); @@ -429,6 +440,7 @@ void validateAsAdmin_FailSameOverlap() { actual = accessControlEntryService.validateAsAdmin(toCreate2, namespace); assertEquals(1, actual.size()); } + @Test void validateAsAdmin_FailParentOverlap() { // another namespace is already OWNER of PREFIXED or LITERAL resource @@ -448,69 +460,69 @@ void validateAsAdmin_FailParentOverlap() { // namespace2 OWNER:LITERAL:project2 OK 8 // namespace2 OWNER:LITERAL:proj OK 9 AccessControlEntry existing1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing1") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing1") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1") + .grantedTo("other-ns") + .build()) + .build(); AccessControlEntry existing2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing2") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2_t1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing2") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2_t1") + .grantedTo("other-ns") + .build()) + .build(); Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("target-ns") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("target-ns") + .cluster("local") + .build()) + .build(); AccessControlEntry toCreate1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("proj") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("proj") + .grantedTo("target-ns") + .build()) + .build(); AccessControlEntry toCreate2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2") + .grantedTo("target-ns") + .build()) + .build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(existing1, existing2)); + .thenReturn(List.of(existing1, existing2)); // Test 1 List actual = accessControlEntryService.validateAsAdmin(toCreate1, namespace); @@ -520,6 +532,7 @@ void validateAsAdmin_FailParentOverlap() { actual = accessControlEntryService.validateAsAdmin(toCreate2, namespace); assertEquals(1, actual.size()); } + @Test void validateAsAdmin_FailChildOverlap() { // another namespace is already OWNER of PREFIXED or LITERAL resource @@ -539,69 +552,69 @@ void validateAsAdmin_FailChildOverlap() { // namespace2 OWNER:LITERAL:project2 OK 8 // namespace2 OWNER:LITERAL:proj OK 9 AccessControlEntry existing1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing1") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing1") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1") + .grantedTo("other-ns") + .build()) + .build(); AccessControlEntry existing2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing2") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2_t1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing2") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2_t1") + .grantedTo("other-ns") + .build()) + .build(); Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("target-ns") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("target-ns") + .cluster("local") + .build()) + .build(); AccessControlEntry toCreate1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1_sub") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1_sub") + .grantedTo("target-ns") + .build()) + .build(); AccessControlEntry toCreate2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1_t1") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1_t1") + .grantedTo("target-ns") + .build()) + .build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(existing1, existing2)); + .thenReturn(List.of(existing1, existing2)); // Test 1 List actual = accessControlEntryService.validateAsAdmin(toCreate1, namespace); @@ -633,109 +646,109 @@ void validateAsAdmin_Success() { // namespace2 OWNER:LITERAL:project2 OK 8 <<<<<<<< // namespace2 OWNER:LITERAL:proj OK 9 <<<<<<<< AccessControlEntry existing1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing1") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing1") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project1") + .grantedTo("other-ns") + .build()) + .build(); AccessControlEntry existing2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing2") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2_t1") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing2") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2_t1") + .grantedTo("other-ns") + .build()) + .build(); AccessControlEntry existing3 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-existing2") - .namespace("other-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("p") - .grantedTo("other-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-existing2") + .namespace("other-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("p") + .grantedTo("other-ns") + .build()) + .build(); Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("target-ns") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("target-ns") + .cluster("local") + .build()) + .build(); AccessControlEntry toCreate1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project3_topic1_sub") - .grantedTo("target-ns") - .build()) - .build(); - AccessControlEntry toCreate2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("project2") - .grantedTo("target-ns") - .build()) - .build(); - AccessControlEntry toCreate3 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder() - .name("acl-tocreate") - .namespace("target-ns") - .cluster("local") - .build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("proj") - .grantedTo("target-ns") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project3_topic1_sub") + .grantedTo("target-ns") + .build()) + .build(); + when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(existing1, existing2, existing3)); + .thenReturn(List.of(existing1, existing2, existing3)); - // Test 1 List actual = accessControlEntryService.validateAsAdmin(toCreate1, namespace); assertTrue(actual.isEmpty()); - // Test 2 + AccessControlEntry toCreate2 = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("project2") + .grantedTo("target-ns") + .build()) + .build(); + actual = accessControlEntryService.validateAsAdmin(toCreate2, namespace); assertTrue(actual.isEmpty()); - // Test 3 + AccessControlEntry toCreate3 = AccessControlEntry.builder() + .metadata(ObjectMeta.builder() + .name("acl-tocreate") + .namespace("target-ns") + .cluster("local") + .build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("proj") + .grantedTo("target-ns") + .build()) + .build(); + actual = accessControlEntryService.validateAsAdmin(toCreate3, namespace); assertTrue(actual.isEmpty()); } @@ -743,18 +756,18 @@ void validateAsAdmin_Success() { @Test void findAllGrantedToNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("namespace1").build()).build(); + .metadata(ObjectMeta.builder().name("namespace1").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace3 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); AccessControlEntry ace4 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("*").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("*").build()).build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(ace1, ace2, ace3, ace4)); + .thenReturn(List.of(ace1, ace2, ace3, ace4)); List actual = accessControlEntryService.findAllGrantedToNamespace(ns); assertEquals(3, actual.size()); } @@ -762,16 +775,16 @@ void findAllGrantedToNamespace() { @Test void findAllPublicGrantedTo() { AccessControlEntry ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace3 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); AccessControlEntry ace4 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("*").build()).build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("*").build()).build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(ace1, ace2, ace3, ace4)); + .thenReturn(List.of(ace1, ace2, ace3, ace4)); List actual = accessControlEntryService.findAllPublicGrantedTo(); assertEquals(1, actual.size()); } @@ -779,19 +792,19 @@ void findAllPublicGrantedTo() { @Test void findAllForNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder().name("namespace1").build()).build(); + .metadata(ObjectMeta.builder().name("namespace1").build()).build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); AccessControlEntry ace3 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(ace1, ace2, ace3)); + .thenReturn(List.of(ace1, ace2, ace3)); List actual = accessControlEntryService.findAllForNamespace(ns); assertEquals(2, actual.size()); } @@ -799,17 +812,17 @@ void findAllForNamespace() { @Test void findAll() { AccessControlEntry ace1 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace1").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace1").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace1").build()).build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace2").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace2").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace2").build()).build(); AccessControlEntry ace3 = AccessControlEntry.builder() - .metadata(ObjectMeta.builder().namespace("namespace3").build()) - .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace3").build()).build(); + .metadata(ObjectMeta.builder().namespace("namespace3").build()) + .spec(AccessControlEntry.AccessControlEntrySpec.builder().grantedTo("namespace3").build()).build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(ace1, ace2, ace3)); + .thenReturn(List.of(ace1, ace2, ace3)); List actual = accessControlEntryService.findAll(); assertEquals(3, actual.size()); } @@ -817,56 +830,56 @@ void findAll() { @Test void isNamespaceOwnerOfResource() { AccessControlEntry ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("main") - .grantedTo("namespace") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("main") + .grantedTo("namespace") + .build() + ) + .build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("connect") - .grantedTo("namespace") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("connect") + .grantedTo("namespace") + .build() + ) + .build(); AccessControlEntry ace3 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.WRITE) - .resource("connect") - .grantedTo("namespace-other") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.WRITE) + .resource("connect") + .grantedTo("namespace-other") + .build() + ) + .build(); when(accessControlEntryRepository.findAll()) - .thenReturn(List.of(ace1, ace2, ace3)); + .thenReturn(List.of(ace1, ace2, ace3)); assertTrue( - accessControlEntryService.isNamespaceOwnerOfResource("namespace", - AccessControlEntry.ResourceType.CONNECT, - "connect")); + accessControlEntryService.isNamespaceOwnerOfResource("namespace", + AccessControlEntry.ResourceType.CONNECT, + "connect")); assertTrue( - accessControlEntryService.isNamespaceOwnerOfResource("namespace", - AccessControlEntry.ResourceType.TOPIC, - "main")); + accessControlEntryService.isNamespaceOwnerOfResource("namespace", + AccessControlEntry.ResourceType.TOPIC, + "main")); assertTrue( - accessControlEntryService.isNamespaceOwnerOfResource("namespace", - AccessControlEntry.ResourceType.TOPIC, - "main.sub"), "subresource"); + accessControlEntryService.isNamespaceOwnerOfResource("namespace", + AccessControlEntry.ResourceType.TOPIC, + "main.sub"), "subresource"); Assertions.assertFalse( - accessControlEntryService.isNamespaceOwnerOfResource("namespace-other", - AccessControlEntry.ResourceType.TOPIC, - "main")); + accessControlEntryService.isNamespaceOwnerOfResource("namespace-other", + AccessControlEntry.ResourceType.TOPIC, + "main")); Assertions.assertFalse( - accessControlEntryService.isNamespaceOwnerOfResource("namespace-other", - AccessControlEntry.ResourceType.CONNECT, - "connect")); + accessControlEntryService.isNamespaceOwnerOfResource("namespace-other", + AccessControlEntry.ResourceType.CONNECT, + "connect")); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java index 058a7d43..98537cd0 100644 --- a/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/ConnectClusterServiceTest.java @@ -1,12 +1,21 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.config.SecurityConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; import com.michelin.ns4kafka.models.connect.cluster.VaultResponse; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; +import com.michelin.ns4kafka.properties.SecurityProperties; import com.michelin.ns4kafka.repositories.ConnectClusterRepository; import com.michelin.ns4kafka.services.clients.connect.KafkaConnectClient; import com.michelin.ns4kafka.services.clients.connect.entities.ServerInfo; @@ -15,6 +24,11 @@ import io.micronaut.http.MutableHttpRequest; import io.micronaut.http.client.HttpClient; import io.micronaut.http.client.exceptions.HttpClientException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Stream; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -24,18 +38,9 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Stream; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - +/** + * Connect cluster service test. + */ @ExtendWith(MockitoExtension.class) class ConnectClusterServiceTest { @Mock @@ -48,10 +53,10 @@ class ConnectClusterServiceTest { AccessControlEntryService accessControlEntryService; @Mock - List kafkaAsyncExecutorConfigList; + List managedClusterPropertiesList; @Mock - SecurityConfig securityConfig; + SecurityProperties securityProperties; @InjectMocks ConnectClusterService connectClusterService; @@ -59,268 +64,257 @@ class ConnectClusterServiceTest { @Mock HttpClient httpClient; - /** - * Test find all - */ @Test void findAllEmpty() { when(connectClusterRepository.findAll()).thenReturn(List.of()); - + StepVerifier.create(connectClusterService.findAll(false)) .verifyComplete(); } - /** - * Test find all - */ @Test void findAll() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(connectClusterRepository.findAll()).thenReturn(List.of(connectCluster)); when(kafkaConnectClient.version(any(), any())) - .thenReturn(Mono.just(HttpResponse.ok())); + .thenReturn(Mono.just(HttpResponse.ok())); StepVerifier.create(connectClusterService.findAll(false)) .consumeNextWith(result -> assertEquals(connectCluster, result)) .verifyComplete(); } - /** - * Test find all - */ @Test void shouldFindAllIncludingHardDeclared() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(connectClusterRepository.findAll()).thenReturn(new ArrayList<>(List.of(connectCluster))); - KafkaAsyncExecutorConfig kafka = new KafkaAsyncExecutorConfig("local"); - kafka.setConnects(Map.of("test-connect", new KafkaAsyncExecutorConfig.ConnectConfig())); - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of(kafka)); + ManagedClusterProperties kafka = new ManagedClusterProperties("local"); + kafka.setConnects(Map.of("test-connect", new ManagedClusterProperties.ConnectProperties())); + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of(kafka)); when(kafkaConnectClient.version(any(), any())) - .thenReturn(Mono.just(HttpResponse.ok())) - .thenReturn(Mono.error(new Exception("error"))); + .thenReturn(Mono.just(HttpResponse.ok())) + .thenReturn(Mono.error(new Exception("error"))); StepVerifier.create(connectClusterService.findAll(true)) - .consumeNextWith(result -> { - assertEquals("connect-cluster", result.getMetadata().getName()); - assertEquals(ConnectCluster.Status.HEALTHY, result.getSpec().getStatus()); - assertNull(result.getSpec().getStatusMessage()); - }) - .consumeNextWith(result -> { - assertEquals("test-connect", result.getMetadata().getName()); - assertEquals(ConnectCluster.Status.IDLE, result.getSpec().getStatus()); - assertEquals("error", result.getSpec().getStatusMessage()); - }) - .verifyComplete(); + .consumeNextWith(result -> { + assertEquals("connect-cluster", result.getMetadata().getName()); + assertEquals(ConnectCluster.Status.HEALTHY, result.getSpec().getStatus()); + assertNull(result.getSpec().getStatusMessage()); + }) + .consumeNextWith(result -> { + assertEquals("test-connect", result.getMetadata().getName()); + assertEquals(ConnectCluster.Status.IDLE, result.getSpec().getStatus()); + assertEquals("error", result.getSpec().getStatusMessage()); + }) + .verifyComplete(); } - /** - * Test find all for namespace - */ @Test void findAllForNamespace() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); ConnectCluster connectClusterTwo = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix2.connect-two") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix2.connect-two") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); ConnectCluster connectClusterThree = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); ConnectCluster connectClusterFour = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("not-owner") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("not-owner") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster, connectClusterTwo, connectClusterThree, connectClusterFour)); + .thenReturn(List.of(connectCluster, connectClusterTwo, connectClusterThree, connectClusterFour)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix2.connect-two") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.READ) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix3.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("topic.") - .build()) - .build() - )); - - List actual = connectClusterService.findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER)); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix2.connect-two") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.READ) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix3.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("topic.") + .build()) + .build() + )); + + List actual = + connectClusterService.findAllByNamespace(namespace, List.of(AccessControlEntry.Permission.OWNER)); assertEquals(2, actual.size()); - - assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix.connect-cluster"))); - assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix2.connect-two"))); - - Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("not-owner"))); - Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix3.connect-cluster"))); + + assertTrue( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix.connect-cluster"))); + assertTrue( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix2.connect-two"))); + + Assertions.assertFalse( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("not-owner"))); + Assertions.assertFalse( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("prefix3.connect-cluster"))); } - /** - * Test find by namespace and name - */ @Test void findByNamespaceAndName() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(kafkaConnectClient.version("local", "prefix.connect-cluster")) - .thenReturn(Mono.just(HttpResponse.ok())); + .thenReturn(Mono.just(HttpResponse.ok())); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); - - Optional actual = connectClusterService.findByNamespaceAndNameOwner(namespace, "prefix.connect-cluster"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + Optional actual = + connectClusterService.findByNamespaceAndNameOwner(namespace, "prefix.connect-cluster"); assertTrue(actual.isPresent()); assertEquals("prefix.connect-cluster", actual.get().getMetadata().getName()); assertEquals(ConnectCluster.Status.HEALTHY, actual.get().getSpec().getStatus()); } - /** - * Test find by namespace and name when Kafka Connect is unhealthy - */ @Test void findByNamespaceAndNameUnhealthy() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(kafkaConnectClient.version("local", "prefix.connect-cluster")) - .thenReturn(Mono.error(new HttpClientException("Internal Server Error"))); + .thenReturn(Mono.error(new HttpClientException("Internal Server Error"))); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); - - Optional actual = connectClusterService.findByNamespaceAndNameOwner(namespace, "prefix.connect-cluster"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + Optional actual = + connectClusterService.findByNamespaceAndNameOwner(namespace, "prefix.connect-cluster"); assertTrue(actual.isPresent()); assertEquals("prefix.connect-cluster", actual.get().getMetadata().getName()); @@ -328,66 +322,61 @@ void findByNamespaceAndNameUnhealthy() { assertEquals("Internal Server Error", actual.get().getSpec().getStatusMessage()); } - /** - * Test find by namespace and name empty response - */ @Test void findByNamespaceAndNameEmpty() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(kafkaConnectClient.version("local", "prefix.connect-cluster")) - .thenReturn(Mono.just(HttpResponse.ok())); + .thenReturn(Mono.just(HttpResponse.ok())); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); - - Optional actual = connectClusterService.findByNamespaceAndNameOwner(namespace, "does-not-exist"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + Optional actual = + connectClusterService.findByNamespaceAndNameOwner(namespace, "does-not-exist"); assertTrue(actual.isEmpty()); } - /** - * Test creation - */ @Test void create() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); when(connectClusterRepository.create(connectCluster)).thenReturn(connectCluster); @@ -395,26 +384,22 @@ void create() { assertEquals(actual, connectCluster); } - - /** - * Test creation with encrypted credentials - */ @Test void createCredentialsEncrypted() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .username("myUsername") - .password("myPassword") - .aes256Key("myAES256Key") - .aes256Salt("myAES256Salt") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("myUsername") + .password("myPassword") + .aes256Key("myAES256Key") + .aes256Salt("myAES256Salt") + .build()) + .build(); when(connectClusterRepository.create(connectCluster)).thenReturn(connectCluster); - when(securityConfig.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); + when(securityProperties.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); connectClusterService.create(connectCluster); @@ -423,284 +408,278 @@ void createCredentialsEncrypted() { assertNotEquals("myAES256Salt", connectCluster.getSpec().getAes256Salt()); } - /** - * Test validate connect cluster creation when Connect cluster is already defined in the - * Ns4Kafka configuration - */ @Test void validateConnectClusterCreationAlreadyDefined() { - ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("test-connect") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .build()) - .build(); - - KafkaAsyncExecutorConfig kafka = new KafkaAsyncExecutorConfig("local"); - kafka.setConnects(Map.of("test-connect", new KafkaAsyncExecutorConfig.ConnectConfig())); - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of(kafka)); + ManagedClusterProperties kafka = new ManagedClusterProperties("local"); + kafka.setConnects(Map.of("test-connect", new ManagedClusterProperties.ConnectProperties())); + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of(kafka)); when(httpClient.retrieve(any(MutableHttpRequest.class), eq(ServerInfo.class))) - .thenReturn(Mono.just(ServerInfo.builder().build())); + .thenReturn(Mono.just(ServerInfo.builder().build())); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .build()) + .build(); StepVerifier.create(connectClusterService.validateConnectClusterCreation(connectCluster)) - .consumeNextWith(errors -> { - assertEquals(1L, errors.size()); - assertEquals("A Kafka Connect is already defined globally with the name \"test-connect\". Please provide a different name.", errors.get(0)); - }) - .verifyComplete(); + .consumeNextWith(errors -> { + assertEquals(1L, errors.size()); + assertEquals( + "A Kafka Connect is already defined globally with the name \"test-connect\"." + + " Please provide a different name.", + errors.get(0)); + }) + .verifyComplete(); } - /** - * Test validate connect cluster creation when Connect cluster is down - */ @Test void validateConnectClusterCreationDown() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("test-connect") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .username("username") - .password("password") - .build()) - .build(); - - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("username") + .password("password") + .build()) + .build(); + + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of()); when(httpClient.retrieve(any(MutableHttpRequest.class), eq(ServerInfo.class))) - .thenReturn(Mono.error(new HttpClientException("Error"))); + .thenReturn(Mono.error(new HttpClientException("Error"))); StepVerifier.create(connectClusterService.validateConnectClusterCreation(connectCluster)) - .consumeNextWith(errors -> { - assertEquals(1L, errors.size()); - assertEquals("The Kafka Connect \"test-connect\" is not healthy (Error).", errors.get(0)); - }) - .verifyComplete(); + .consumeNextWith(errors -> { + assertEquals(1L, errors.size()); + assertEquals("The Kafka Connect \"test-connect\" is not healthy (Error).", errors.get(0)); + }) + .verifyComplete(); } - /** - * Test validate connect cluster creation malformed URL - */ @Test void validateConnectClusterCreationMalformedUrl() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("test-connect") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("malformed-url") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("malformed-url") + .build()) + .build(); - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of()); StepVerifier.create(connectClusterService.validateConnectClusterCreation(connectCluster)) - .consumeNextWith(errors -> { - assertEquals(1L, errors.size()); - assertEquals("The Kafka Connect \"test-connect\" has a malformed URL \"malformed-url\".", errors.get(0)); - }) - .verifyComplete(); + .consumeNextWith(errors -> { + assertEquals(1L, errors.size()); + assertEquals("The Kafka Connect \"test-connect\" has a malformed URL \"malformed-url\".", + errors.get(0)); + }) + .verifyComplete(); } /** * Test validate connect cluster creation when aes 256 configuration missing salt. */ @Test - void validateConnectClusterCreationBadAES256MissingSalt() { + void validateConnectClusterCreationBadAes256MissingSalt() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("test-connect") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .username("username") - .password("password") - .aes256Key("aes256Key") - .build()) - .build(); - - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("username") + .password("password") + .aes256Key("aes256Key") + .build()) + .build(); + + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of()); when(httpClient.retrieve(any(MutableHttpRequest.class), eq(ServerInfo.class))) - .thenReturn(Mono.just(ServerInfo.builder().build())); + .thenReturn(Mono.just(ServerInfo.builder().build())); StepVerifier.create(connectClusterService.validateConnectClusterCreation(connectCluster)) - .consumeNextWith(errors -> { - assertEquals(1L, errors.size()); - assertEquals("The Connect cluster \"test-connect\" \"aes256Key\" and \"aes256Salt\" specs are required to activate the encryption.", errors.get(0)); - }) - .verifyComplete(); + .consumeNextWith(errors -> { + assertEquals(1L, errors.size()); + assertEquals( + "The Connect cluster \"test-connect\" \"aes256Key\" and \"aes256Salt\" specs are required" + + " to activate the encryption.", + errors.get(0)); + }) + .verifyComplete(); } /** * Test validate connect cluster creation when aes 256 configuration missing key. */ @Test - void validateConnectClusterCreationBadAES256MissingKey() { + void validateConnectClusterCreationBadAes256MissingKey() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("test-connect") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .username("username") - .password("password") - .aes256Salt("aes256Salt") - .build()) - .build(); - - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("username") + .password("password") + .aes256Salt("aes256Salt") + .build()) + .build(); + + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of()); when(httpClient.retrieve(any(MutableHttpRequest.class), eq(ServerInfo.class))) - .thenReturn(Mono.just(ServerInfo.builder().build())); + .thenReturn(Mono.just(ServerInfo.builder().build())); StepVerifier.create(connectClusterService.validateConnectClusterCreation(connectCluster)) - .consumeNextWith(errors -> { - assertEquals(1L, errors.size()); - assertEquals("The Connect cluster \"test-connect\" \"aes256Key\" and \"aes256Salt\" specs are required to activate the encryption.", errors.get(0)); - }) - .verifyComplete(); + .consumeNextWith(errors -> { + assertEquals(1L, errors.size()); + assertEquals( + "The Connect cluster \"test-connect\" \"aes256Key\" and \"aes256Salt\" specs are required " + + "to activate the encryption.", + errors.get(0)); + }) + .verifyComplete(); } - /** - * Test validate connect cluster creation when Connect cluster is down and encryption key is missing - */ @Test void validateConnectClusterCreationDownWithMissingKey() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("test-connect") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .url("https://after") - .username("username") - .password("password") - .aes256Salt("aes256Salt") - .build()) - .build(); - - when(kafkaAsyncExecutorConfigList.stream()).thenReturn(Stream.of()); + .metadata(ObjectMeta.builder().name("test-connect") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .url("https://after") + .username("username") + .password("password") + .aes256Salt("aes256Salt") + .build()) + .build(); + + when(managedClusterPropertiesList.stream()).thenReturn(Stream.of()); when(httpClient.retrieve(any(MutableHttpRequest.class), eq(ServerInfo.class))) - .thenReturn(Mono.error(new HttpClientException("Error"))); + .thenReturn(Mono.error(new HttpClientException("Error"))); StepVerifier.create(connectClusterService.validateConnectClusterCreation(connectCluster)) - .consumeNextWith(errors -> { - assertEquals(2L, errors.size()); - assertTrue(errors.contains("The Kafka Connect \"test-connect\" is not healthy (Error).")); - assertTrue(errors.contains("The Connect cluster \"test-connect\" \"aes256Key\" and \"aes256Salt\" specs are required to activate the encryption.")); - }) - .verifyComplete(); + .consumeNextWith(errors -> { + assertEquals(2L, errors.size()); + assertTrue(errors.contains("The Kafka Connect \"test-connect\" is not healthy (Error).")); + assertTrue(errors.contains( + "The Connect cluster \"test-connect\" \"aes256Key\" and \"aes256Salt\" specs are required" + + " to activate the encryption.")); + }) + .verifyComplete(); } - /** - * Test validate connect cluster vault when No Connect cluster are available for namespace - */ @Test void validateConnectClusterVaultNoClusterAvailable() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("aes256Key") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("aes256Key") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("fake-prefix.") - .build()) - .build() - )); - - List errors = connectClusterService.validateConnectClusterVault(namespace, "prefix.fake-connect-cluster"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("fake-prefix.") + .build()) + .build() + )); + + List errors = + connectClusterService.validateConnectClusterVault(namespace, "prefix.fake-connect-cluster"); assertEquals(1L, errors.size()); assertEquals("No Connect Cluster available.", errors.get(0)); } - /** - * Test validate connect cluster vault when namespace does not have any Connect Cluster - * with valid aes256 specs. - */ @Test - void validateConnectClusterVaultNoClusterAvailableWithAES256() { + void validateConnectClusterVaultNoClusterAvailableWithAes256() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster1 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix1.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix1.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); ConnectCluster connectCluster2 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix2.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("aes256Key") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix2.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("aes256Key") + .build()) + .build(); ConnectCluster connectCluster3 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Salt("aes256Salt") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Salt("aes256Salt") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster1, connectCluster2, connectCluster3)); + .thenReturn(List.of(connectCluster1, connectCluster2, connectCluster3)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix1.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix2.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix3.") - .build()) - .build() - )); - - List errors = connectClusterService.validateConnectClusterVault(namespace, "prefix1.fake-connect-cluster"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix1.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix2.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix3.") + .build()) + .build() + )); + + List errors = + connectClusterService.validateConnectClusterVault(namespace, "prefix1.fake-connect-cluster"); assertEquals(1L, errors.size()); assertEquals("No Connect cluster available with valid aes256 specs configuration.", errors.get(0)); @@ -713,161 +692,157 @@ void validateConnectClusterVaultNoClusterAvailableWithAES256() { @Test void validateConnectClusterVaultClusterNotAvailable() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster1 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix1.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix1.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); ConnectCluster connectCluster2 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix2.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("aes256Key") - .aes256Salt("aes256Salt") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix2.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("aes256Key") + .aes256Salt("aes256Salt") + .build()) + .build(); ConnectCluster connectCluster3 = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("aes256Key") - .aes256Salt("aes256Salt") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix3.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("aes256Key") + .aes256Salt("aes256Salt") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster1, connectCluster2, connectCluster3)); + .thenReturn(List.of(connectCluster1, connectCluster2, connectCluster3)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix1.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix2.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix3.") - .build()) - .build() - )); - - List errors = connectClusterService.validateConnectClusterVault(namespace, "prefix1.fake-connect-cluster"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix1.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix2.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix3.") + .build()) + .build() + )); + + List errors = + connectClusterService.validateConnectClusterVault(namespace, "prefix1.fake-connect-cluster"); assertEquals(1L, errors.size()); - assertEquals("Invalid value \"prefix1.fake-connect-cluster\" for Connect Cluster: Value must be one of [" + - "prefix2.connect-cluster, prefix3.connect-cluster].", errors.get(0)); + assertEquals("Invalid value \"prefix1.fake-connect-cluster\" for Connect Cluster: Value must be one of [" + + "prefix2.connect-cluster, prefix3.connect-cluster].", errors.get(0)); } - /** - * Test validate connect cluster vault. - */ @Test void validateConnectClusterVault() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("aes256Key") - .aes256Salt("aes256Salt") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("aes256Key") + .aes256Salt("aes256Salt") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); List errors = connectClusterService.validateConnectClusterVault(namespace, "prefix.connect-cluster"); assertEquals(0L, errors.size()); } - /** - * Test vault password if no connect cluster with aes256 config define. - */ @Test void vaultPasswordNoConnectClusterWithAes256Config() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key("aes256Key") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key("aes256Key") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); - - List actual = connectClusterService.vaultPassword(namespace, "prefix.connect-cluster", List.of("secret")); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + List actual = + connectClusterService.vaultPassword(namespace, "prefix.connect-cluster", List.of("secret")); assertEquals("secret", actual.get(0).getSpec().getEncrypted()); } @@ -879,67 +854,67 @@ void vaultPasswordNoConnectClusterWithAes256Config() { void findAllByNamespaceWriteAsOwner() { String encryptKey = "changeitchangeitchangeitchangeit"; Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .password(EncryptionUtils.encryptAES256GCM("password", encryptKey)) - .aes256Key(EncryptionUtils.encryptAES256GCM("aes256Key", encryptKey)) - .aes256Salt(EncryptionUtils.encryptAES256GCM("aes256Salt", encryptKey)) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .password(EncryptionUtils.encryptAes256Gcm("password", encryptKey)) + .aes256Key(EncryptionUtils.encryptAes256Gcm("aes256Key", encryptKey)) + .aes256Salt(EncryptionUtils.encryptAes256Gcm("aes256Salt", encryptKey)) + .build()) + .build(); ConnectCluster connectClusterOwner = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("owner.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .password(EncryptionUtils.encryptAES256GCM("password", encryptKey)) - .aes256Key(EncryptionUtils.encryptAES256GCM("aes256Key", encryptKey)) - .aes256Salt(EncryptionUtils.encryptAES256GCM("aes256Salt", encryptKey)) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("owner.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .password(EncryptionUtils.encryptAes256Gcm("password", encryptKey)) + .aes256Key(EncryptionUtils.encryptAes256Gcm("aes256Key", encryptKey)) + .aes256Salt(EncryptionUtils.encryptAes256Gcm("aes256Salt", encryptKey)) + .build()) + .build(); when(kafkaConnectClient.version(any(), any())) - .thenReturn(Mono.just(HttpResponse.ok())); + .thenReturn(Mono.just(HttpResponse.ok())); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster, connectClusterOwner)); + .thenReturn(List.of(connectCluster, connectClusterOwner)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("owner.") - .build()) - .build() - )); - - when(securityConfig.getAes256EncryptionKey()).thenReturn(encryptKey); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("owner.") + .build()) + .build() + )); + + when(securityProperties.getAes256EncryptionKey()).thenReturn(encryptKey); List actual = connectClusterService.findAllByNamespaceWrite(namespace); assertEquals(2, actual.size()); @@ -954,41 +929,35 @@ void findAllByNamespaceWriteAsOwner() { assertEquals("*****", actual.get(1).getSpec().getAes256Salt()); } - /** - * Should delete a self-deployed Kafka Connect - */ @Test void shouldDelete() { ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); connectClusterService.delete(connectCluster); verify(connectClusterRepository).delete(connectCluster); } - /** - * Should namespace be owner of Kafka Connect - */ @Test void shouldNamespaceOwnerOfConnectCluster() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); when(accessControlEntryService.isNamespaceOwnerOfResource(any(), any(), any())) - .thenReturn(true); + .thenReturn(true); boolean actual = connectClusterService.isNamespaceOwnerOfConnectCluster(namespace, "prefix.connect-cluster"); @@ -998,59 +967,59 @@ void shouldNamespaceOwnerOfConnectCluster() { @Test void shouldNamespaceAllowedForConnectCluster() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); ConnectCluster connectClusterOwner = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("owner.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("owner.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); when(kafkaConnectClient.version(any(), any())) - .thenReturn(Mono.just(HttpResponse.ok())); + .thenReturn(Mono.just(HttpResponse.ok())); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster, connectClusterOwner)); + .thenReturn(List.of(connectCluster, connectClusterOwner)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("owner.") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("owner.") + .build()) + .build() + )); boolean actual = connectClusterService.isNamespaceAllowedForConnectCluster(namespace, "prefix.connect-cluster"); @@ -1060,61 +1029,62 @@ void shouldNamespaceAllowedForConnectCluster() { @Test void shouldNamespaceNotAllowedForConnectCluster() { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("prefix.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); ConnectCluster connectClusterOwner = ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("owner.connect-cluster") - .cluster("local") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("owner.connect-cluster") + .cluster("local") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .build()) + .build(); when(kafkaConnectClient.version(any(), any())) - .thenReturn(Mono.just(HttpResponse.ok())); + .thenReturn(Mono.just(HttpResponse.ok())); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster, connectClusterOwner)); + .thenReturn(List.of(connectCluster, connectClusterOwner)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("owner.") - .build()) - .build() - )); - - boolean actual = connectClusterService.isNamespaceAllowedForConnectCluster(namespace, "not-allowed-prefix.connect-cluster"); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("owner.") + .build()) + .build() + )); + + boolean actual = + connectClusterService.isNamespaceAllowedForConnectCluster(namespace, "not-allowed-prefix.connect-cluster"); Assertions.assertFalse(actual); } @@ -1127,42 +1097,43 @@ void vaultPasswordWithoutFormat() { String encryptionKey = "changeitchangeitchangeitchangeit"; Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key(EncryptionUtils.encryptAES256GCM("aes256Key", encryptionKey)) - .aes256Salt(EncryptionUtils.encryptAES256GCM("aes256Salt", encryptionKey)) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key(EncryptionUtils.encryptAes256Gcm("aes256Key", encryptionKey)) + .aes256Salt(EncryptionUtils.encryptAes256Gcm("aes256Salt", encryptionKey)) + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); - - when(securityConfig.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); - - List actual = connectClusterService.vaultPassword(namespace, "prefix.connect-cluster", List.of("secret")); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + when(securityProperties.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); + + List actual = + connectClusterService.vaultPassword(namespace, "prefix.connect-cluster", List.of("secret")); assertTrue(actual.get(0).getSpec().getEncrypted().matches("^\\$\\{aes256\\:.*\\}")); } @@ -1175,43 +1146,44 @@ void vaultPasswordWithFormat() { String encryptionKey = "changeitchangeitchangeitchangeit"; Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("prefix.connect-cluster") - .build()) - .spec(ConnectCluster.ConnectClusterSpec.builder() - .aes256Key(EncryptionUtils.encryptAES256GCM("aes256Key", encryptionKey)) - .aes256Salt(EncryptionUtils.encryptAES256GCM("aes256Salt", encryptionKey)) - .aes256Format("%s") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("prefix.connect-cluster") + .build()) + .spec(ConnectCluster.ConnectClusterSpec.builder() + .aes256Key(EncryptionUtils.encryptAes256Gcm("aes256Key", encryptionKey)) + .aes256Salt(EncryptionUtils.encryptAes256Gcm("aes256Salt", encryptionKey)) + .aes256Format("%s") + .build()) + .build(); when(connectClusterRepository.findAllForCluster("local")) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) - .resource("prefix.") - .build()) - .build() - )); - - when(securityConfig.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); - - List actual = connectClusterService.vaultPassword(namespace, "prefix.connect-cluster", List.of("secret")); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT_CLUSTER) + .resource("prefix.") + .build()) + .build() + )); + + when(securityProperties.getAes256EncryptionKey()).thenReturn("changeitchangeitchangeitchangeit"); + + List actual = + connectClusterService.vaultPassword(namespace, "prefix.connect-cluster", List.of("secret")); Assertions.assertFalse(actual.get(0).getSpec().getEncrypted().matches("^\\$\\{aes256\\:.*\\}")); } diff --git a/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java index 7a8cf9e2..67da839e 100644 --- a/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/ConnectorServiceTest.java @@ -1,5 +1,14 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; @@ -8,7 +17,12 @@ import com.michelin.ns4kafka.models.connector.Connector; import com.michelin.ns4kafka.repositories.ConnectorRepository; import com.michelin.ns4kafka.services.clients.connect.KafkaConnectClient; -import com.michelin.ns4kafka.services.clients.connect.entities.*; +import com.michelin.ns4kafka.services.clients.connect.entities.ConfigInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConfigInfos; +import com.michelin.ns4kafka.services.clients.connect.entities.ConfigKeyInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConfigValueInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorPluginInfo; +import com.michelin.ns4kafka.services.clients.connect.entities.ConnectorType; import com.michelin.ns4kafka.services.executors.ConnectorAsyncExecutor; import com.michelin.ns4kafka.validation.ConnectValidator; import com.michelin.ns4kafka.validation.ResourceValidator; @@ -17,6 +31,9 @@ import io.micronaut.http.HttpStatus; import io.micronaut.http.client.exceptions.HttpClientResponseException; import io.micronaut.inject.qualifiers.Qualifiers; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -28,14 +45,6 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class ConnectorServiceTest { @Mock @@ -56,111 +65,105 @@ class ConnectorServiceTest { @Mock ConnectClusterService connectClusterService; - /** - * Test to find all connectors by namespace when there is no connector - */ @Test void findByNamespaceNone() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); List actual = connectorService.findAllForNamespace(ns); assertTrue(actual.isEmpty()); } - /** - * Test to find all connectors by namespace - */ @Test void findByNamespaceMultiple() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Connector c1 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .build(); Connector c2 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .build(); Connector c3 = Connector.builder() - .metadata(ObjectMeta.builder().name("other-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("other-connect1").build()) + .build(); Connector c4 = Connector.builder() - .metadata(ObjectMeta.builder().name("other-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("other-connect2").build()) + .build(); Connector c5 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns2-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-connect1").build()) + .build(); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("other-connect1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.READ) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns2-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns3-") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("other-connect1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.READ) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns2-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns3-") + .build()) + .build() + )); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of(c1, c2, c3, c4, c5)); + .thenReturn(List.of(c1, c2, c3, c4, c5)); List actual = connectorService.findAllForNamespace(ns); @@ -170,102 +173,99 @@ void findByNamespaceMultiple() { assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns-connect2"))); assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("other-connect1"))); // doesn't contain - Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("other-connect2"))); - Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns2-connect1"))); - Assertions.assertFalse(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns3-connect1"))); + Assertions.assertFalse( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("other-connect2"))); + Assertions.assertFalse( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns2-connect1"))); + Assertions.assertFalse( + actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns3-connect1"))); } - /** - * Test to find a given connector that does not exist - */ @Test void findByNameNotFound() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); Optional actual = connectorService.findByName(ns, "ns-connect1"); assertTrue(actual.isEmpty()); } - /** - * Test to find a given connector - */ @Test void findByNameFound() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Connector c1 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .build(); Connector c2 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .build(); Connector c3 = Connector.builder() - .metadata(ObjectMeta.builder().name("other-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("other-connect1").build()) + .build(); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("other-connect1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("other-connect1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build() + )); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of(AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build())); + .thenReturn(List.of(AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build())); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of(c1, c2, c3)); + .thenReturn(List.of(c1, c2, c3)); Optional actual = connectorService.findByName(ns, "ns-connect1"); @@ -273,58 +273,55 @@ void findByNameFound() { assertEquals("ns-connect1", actual.get().getMetadata().getName()); } - /** - * Test find all by namespace and connect cluster - */ @Test void findAllByConnectCluster() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Connector c1 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("connect-cluster") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster") + .build()) + .build(); Connector c2 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect2").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("connect-cluster2") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster2") + .build()) + .build(); Connector c3 = Connector.builder() - .metadata(ObjectMeta.builder().name("other-connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("connect-cluster3") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("other-connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster3") + .build()) + .build(); Connector c4 = Connector.builder() - .metadata(ObjectMeta.builder().name("other-connect2").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("connect-cluster4") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("other-connect2").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster4") + .build()) + .build(); Connector c5 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns2-connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("connect-cluster5") - .build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("connect-cluster5") + .build()) + .build(); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of(c1, c2, c3, c4, c5)); + .thenReturn(List.of(c1, c2, c3, c4, c5)); List actual = connectorService.findAllByConnectCluster(ns, "connect-cluster"); @@ -332,176 +329,171 @@ void findAllByConnectCluster() { assertTrue(actual.stream().anyMatch(connector -> connector.getMetadata().getName().equals("ns-connect1"))); } - /** - * Test to validate the configuration of a connector when the KConnect cluster is invalid - */ @Test void validateLocallyInvalidConnectCluster() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("wrong") - .config(Map.of("connector.class", "Test")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("wrong") + .config(Map.of("connector.class", "Test")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectValidator(ConnectValidator.builder() - .validationConstraints(Map.of()) - .sourceValidationConstraints(Map.of()) - .sinkValidationConstraints(Map.of()) - .classValidationConstraints(Map.of()) - .build()) - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .validationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .sinkValidationConstraints(Map.of()) + .classValidationConstraints(Map.of()) + .build()) + .connectClusters(List.of("local-name")) + .build()) + .build(); when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn(List.of()); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> { assertEquals(1, response.size()); - assertEquals("Invalid value wrong for spec.connectCluster: Value must be one of [local-name]", response.get(0)); + assertEquals("Invalid value wrong for spec.connectCluster: Value must be one of [local-name]", + response.get(0)); }) .verifyComplete(); } - /** - * Test to validate the configuration of a connector when the class name is missing - */ @Test void validateLocallyNoClassName() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of()) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of()) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> { assertEquals(1, response.size()); - assertEquals("Invalid value for spec.config.'connector.class': Value must be non-null", response.get(0)); + assertEquals("Invalid value for spec.config.'connector.class': Value must be non-null", + response.get(0)); }) .verifyComplete(); } - /** - * Test to validate the configuration of a connector when the class name is invalid - */ @Test void validateLocallyInvalidClassName() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of())); + .thenReturn(Mono.just(List.of())); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> { assertEquals(1, response.size()); - assertEquals("Failed to find any class that implements Connector and which name matches org.apache.kafka.connect.file.FileStreamSinkConnector", response.get(0)); + assertEquals( + "Failed to find any class that implements Connector and which name matches " + + "org.apache.kafka.connect.file.FileStreamSinkConnector", + response.get(0)); }) .verifyComplete(); } - /** - * Test to validate the configuration of a connector when a field should not be null - */ @Test void validateLocallyValidationErrors() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectValidator(ConnectValidator.builder() - .validationConstraints(Map.of("missing.field", new ResourceValidator.NonEmptyString())) - .sinkValidationConstraints(Map.of()) - .sourceValidationConstraints(Map.of()) - .classValidationConstraints(Map.of()) - .build()) - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .validationConstraints(Map.of("missing.field", new ResourceValidator.NonEmptyString())) + .sinkValidationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .classValidationConstraints(Map.of()) + .build()) + .connectClusters(List.of("local-name")) + .build()) + .build(); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + .thenReturn(Mono.just(List.of( + new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, + "v1")))); StepVerifier.create(connectorService.validateLocally(ns, connector)) - .consumeNextWith(response -> { - assertEquals(1, response.size()); - assertEquals("Invalid value null for configuration missing.field: Value must be non-null", response.get(0)); - }) - .verifyComplete(); + .consumeNextWith(response -> { + assertEquals(1, response.size()); + assertEquals("Invalid value null for configuration missing.field: Value must be non-null", + response.get(0)); + }) + .verifyComplete(); } - /** - * Test to validate the configuration of a connector - */ @Test void validateLocallySuccess() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectValidator(ConnectValidator.builder() - .classValidationConstraints(Map.of()) - .sinkValidationConstraints(Map.of()) - .sourceValidationConstraints(Map.of()) - .validationConstraints(Map.of()) - .build()) - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .classValidationConstraints(Map.of()) + .sinkValidationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .validationConstraints(Map.of()) + .build()) + .connectClusters(List.of("local-name")) + .build()) + .build(); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + .thenReturn(Mono.just(List.of( + new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, + "v1")))); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> assertTrue(response.isEmpty())) @@ -511,25 +503,27 @@ void validateLocallySuccess() { @Test void validateLocallySuccessWithNoConstraint() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + .thenReturn(Mono.just(List.of( + new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, + "v1")))); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> assertTrue(response.isEmpty())) @@ -539,30 +533,32 @@ void validateLocallySuccessWithNoConstraint() { @Test void validateLocallySuccessWithNoValidationConstraint() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectValidator(ConnectValidator.builder() - .classValidationConstraints(Map.of()) - .sinkValidationConstraints(Map.of()) - .sourceValidationConstraints(Map.of()) - .build()) - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .classValidationConstraints(Map.of()) + .sinkValidationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .build()) + .connectClusters(List.of("local-name")) + .build()) + .build(); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + .thenReturn(Mono.just(List.of( + new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, + "v1")))); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> assertTrue(response.isEmpty())) @@ -572,110 +568,108 @@ void validateLocallySuccessWithNoValidationConstraint() { @Test void validateLocallySuccessNoSinkValidationConstraint() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectValidator(ConnectValidator.builder() - .classValidationConstraints(Map.of()) - .sourceValidationConstraints(Map.of()) - .validationConstraints(Map.of()) - .build()) - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .classValidationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .validationConstraints(Map.of()) + .build()) + .connectClusters(List.of("local-name")) + .build()) + .build(); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + .thenReturn(Mono.just(List.of( + new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, + "v1")))); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> assertTrue(response.isEmpty())) .verifyComplete(); } - /** - * Test to validate the configuration of a connector - */ @Test void validateLocallySuccessWithSelfDeployedConnectCluster() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectValidator(ConnectValidator.builder() - .classValidationConstraints(Map.of()) - .sinkValidationConstraints(Map.of()) - .sourceValidationConstraints(Map.of()) - .validationConstraints(Map.of()) - .build()) - .connectClusters(List.of()) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectValidator(ConnectValidator.builder() + .classValidationConstraints(Map.of()) + .sinkValidationConstraints(Map.of()) + .sourceValidationConstraints(Map.of()) + .validationConstraints(Map.of()) + .build()) + .connectClusters(List.of()) + .build()) + .build(); when(connectClusterService.findAllByNamespaceWrite(ns)).thenReturn(List.of(ConnectCluster.builder() - .metadata(ObjectMeta.builder() - .name("local-name") - .build()) - .build())); + .metadata(ObjectMeta.builder() + .name("local-name") + .build()) + .build())); when(kafkaConnectClient.connectPlugins("local", "local-name")) - .thenReturn(Mono.just(List.of(new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, "v1")))); + .thenReturn(Mono.just(List.of( + new ConnectorPluginInfo("org.apache.kafka.connect.file.FileStreamSinkConnector", ConnectorType.SINK, + "v1")))); StepVerifier.create(connectorService.validateLocally(ns, connector)) .consumeNextWith(response -> assertTrue(response.isEmpty())) .verifyComplete(); } - /** - * Test to invalidate the configuration of a connector against the KConnect cluster - */ @Test void validateRemotelyErrors() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "com.michelin.NoClass")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "com.michelin.NoClass")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ConfigInfos configInfos = new ConfigInfos("name", 1, List.of(), - List.of(new ConfigInfo(new ConfigKeyInfo(null, null, false, null, null, null, null, 0, null, null, null), - new ConfigValueInfo(null, null, null, List.of("error_message"), true)))); + List.of(new ConfigInfo(new ConfigKeyInfo(null, null, false, null, null, null, null, 0, null, null, null), + new ConfigValueInfo(null, null, null, List.of("error_message"), true)))); when(kafkaConnectClient.validate( - ArgumentMatchers.eq("local"), - ArgumentMatchers.eq("local-name"), - ArgumentMatchers.any(), - ArgumentMatchers.any())) - .thenReturn(Mono.just(configInfos)); + ArgumentMatchers.eq("local"), + ArgumentMatchers.eq("local-name"), + ArgumentMatchers.any(), + ArgumentMatchers.any())) + .thenReturn(Mono.just(configInfos)); StepVerifier.create(connectorService.validateRemotely(ns, connector)) .consumeNextWith(response -> { @@ -685,125 +679,124 @@ void validateRemotelyErrors() { .verifyComplete(); } - /** - * Test to validate the configuration of a connector against the KConnect cluster - */ @Test void validateRemotelySuccess() { Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("connect1").build()) - .spec(Connector.ConnectorSpec.builder() - .connectCluster("local-name") - .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) - .build()) - .build(); + .metadata(ObjectMeta.builder().name("connect1").build()) + .spec(Connector.ConnectorSpec.builder() + .connectCluster("local-name") + .config(Map.of("connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector")) + .build()) + .build(); Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ConfigInfos configInfos = new ConfigInfos("name", 1, List.of(), List.of()); when(kafkaConnectClient.validate( - ArgumentMatchers.eq("local"), - ArgumentMatchers.eq("local-name"), - ArgumentMatchers.any(), - ArgumentMatchers.any())) - .thenReturn(Mono.just(configInfos)); + ArgumentMatchers.eq("local"), + ArgumentMatchers.eq("local-name"), + ArgumentMatchers.any(), + ArgumentMatchers.any())) + .thenReturn(Mono.just(configInfos)); StepVerifier.create(connectorService.validateRemotely(ns, connector)) .consumeNextWith(response -> assertTrue(response.isEmpty())) .verifyComplete(); } - /** - * Test the listing of unsynchronized connectors when they are all unsynchronized - */ @Test void listUnsynchronizedNoExistingConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ConnectorAsyncExecutor connectorAsyncExecutor = mock(ConnectorAsyncExecutor.class); when(applicationContext.getBean(ConnectorAsyncExecutor.class, - Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(connectorAsyncExecutor); + Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(connectorAsyncExecutor); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("ns-connect-cluster").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect-cluster").build()) + .build(); Connector c1 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .build(); Connector c2 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .build(); Connector c3 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns1-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-connect1").build()) + .build(); Connector c4 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns2-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-connect1").build()) + .build(); Connector c5 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns1-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-connect2").build()) + .build(); when(connectClusterService.findAllByNamespaceWrite(ns)) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(connectorAsyncExecutor.collectBrokerConnectors("local-name")) - .thenReturn(Flux.fromIterable(List.of(c1, c2, c3, c4))); + .thenReturn(Flux.fromIterable(List.of(c1, c2, c3, c4))); when(connectorAsyncExecutor.collectBrokerConnectors("ns-connect-cluster")) - .thenReturn(Flux.fromIterable(List.of(c5))); + .thenReturn(Flux.fromIterable(List.of(c5))); // list of existing Ns4Kafka access control entries - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns-connect1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns-connect2")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns1-connect1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns1-connect2")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns2-connect1")) - .thenReturn(false); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns-connect1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns-connect2")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns1-connect1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns1-connect2")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns2-connect1")) + .thenReturn(false); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns1-connect1") - .build()) - .build())); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns1-connect1") + .build()) + .build())); // no connects exists into Ns4Kafka when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); StepVerifier.create(connectorService.listUnsynchronizedConnectors(ns)) .consumeNextWith(connector -> assertEquals("ns-connect1", connector.getMetadata().getName())) @@ -813,178 +806,181 @@ void listUnsynchronizedNoExistingConnectors() { .verifyComplete(); } - /** - * Test the listing of unsynchronized connectors when they are all synchronized - */ @Test void listUnsynchronizedAllExistingConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ConnectorAsyncExecutor connectorAsyncExecutor = mock(ConnectorAsyncExecutor.class); when(applicationContext.getBean(ConnectorAsyncExecutor.class, - Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(connectorAsyncExecutor); + Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(connectorAsyncExecutor); ConnectCluster connectCluster = ConnectCluster.builder() - .metadata(ObjectMeta.builder().name("ns-connect-cluster").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect-cluster").build()) + .build(); Connector c1 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .build(); Connector c2 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .build(); Connector c3 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns1-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-connect1").build()) + .build(); Connector c4 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns2-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-connect1").build()) + .build(); Connector c5 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns1-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-connect2").build()) + .build(); when(connectClusterService.findAllByNamespaceWrite(ns)) - .thenReturn(List.of(connectCluster)); + .thenReturn(List.of(connectCluster)); when(connectorAsyncExecutor.collectBrokerConnectors("local-name")) - .thenReturn(Flux.fromIterable(List.of(c1, c2, c3, c4))); + .thenReturn(Flux.fromIterable(List.of(c1, c2, c3, c4))); when(connectorAsyncExecutor.collectBrokerConnectors("ns-connect-cluster")) - .thenReturn(Flux.fromIterable(List.of(c5))); + .thenReturn(Flux.fromIterable(List.of(c5))); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of(c1, c2, c3, c4, c5)); + .thenReturn(List.of(c1, c2, c3, c4, c5)); // list of existing Ns4Kafka access control entries - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns-connect1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns-connect2")) + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns-connect1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns-connect2")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns1-connect1")) .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns1-connect1")) + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns1-connect2")) .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns1-connect2")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns2-connect1")) + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns2-connect1")) .thenReturn(false); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns1-connect1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns1-connect2") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns1-connect1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns1-connect2") + .build()) + .build() + )); StepVerifier.create(connectorService.listUnsynchronizedConnectors(ns)) .verifyComplete(); } - /** - * Test the listing of unsynchronized connectors when some are synchronized and some not - */ @Test void listUnsynchronizedPartialExistingConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); // init connectorAsyncExecutor ConnectorAsyncExecutor connectorAsyncExecutor = mock(ConnectorAsyncExecutor.class); when(applicationContext.getBean(ConnectorAsyncExecutor.class, - Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(connectorAsyncExecutor); - + Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(connectorAsyncExecutor); + // list of existing broker connectors Connector c1 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .build(); Connector c2 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect2").build()) + .build(); Connector c3 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns1-connect1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-connect1").build()) + .build(); Connector c4 = Connector.builder() - .metadata(ObjectMeta.builder().name("ns2-connect1").build()) - .build(); - + .metadata(ObjectMeta.builder().name("ns2-connect1").build()) + .build(); + when(connectorAsyncExecutor.collectBrokerConnectors("local-name")).thenReturn(Flux.fromIterable(List.of( - c1, c2, c3, c4))); - + c1, c2, c3, c4))); + // list of existing broker connects when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of(c1, c2, c3, c4)); + .thenReturn(List.of(c1, c2, c3, c4)); // list of existing Ns4Kafka access control entries - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns-connect1")) + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns-connect1")) .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns-connect2")) + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns-connect2")) .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns1-connect1")) + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns1-connect1")) .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, "ns2-connect1")) - .thenReturn(false); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.CONNECT, + "ns2-connect1")) + .thenReturn(false); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns1-connect1") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns1-connect1") + .build()) + .build() + )); when(connectorRepository.findAllForCluster("local")) - .thenReturn(List.of(c1)); + .thenReturn(List.of(c1)); StepVerifier.create(connectorService.listUnsynchronizedConnectors(ns)) .consumeNextWith(connector -> assertEquals("ns-connect2", connector.getMetadata().getName())) @@ -992,63 +988,58 @@ void listUnsynchronizedPartialExistingConnectors() { .verifyComplete(); } - /** - * Tests to delete a connector - */ @Test void deleteConnectorSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .spec(Connector.ConnectorSpec.builder().connectCluster("local-name").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .spec(Connector.ConnectorSpec.builder().connectCluster("local-name").build()) + .build(); when(kafkaConnectClient.delete(ns.getMetadata().getCluster(), - "local-name", "ns-connect1")).thenReturn(Mono.just(HttpResponse.ok())); + "local-name", "ns-connect1")).thenReturn(Mono.just(HttpResponse.ok())); doNothing().when(connectorRepository).delete(connector); StepVerifier.create(connectorService.delete(ns, connector)) - .consumeNextWith(response -> assertEquals( HttpStatus.OK, response.getStatus())) + .consumeNextWith(response -> assertEquals(HttpStatus.OK, response.getStatus())) .verifyComplete(); verify(kafkaConnectClient, times(1)).delete(ns.getMetadata().getCluster(), - "local-name", "ns-connect1"); + "local-name", "ns-connect1"); verify(connectorRepository, times(1)).delete(connector); } - /** - * Tests to delete a connector when the cluster is not responding - */ @Test void deleteConnectorConnectClusterError() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Connector connector = Connector.builder() - .metadata(ObjectMeta.builder().name("ns-connect1").build()) - .spec(Connector.ConnectorSpec.builder().connectCluster("local-name").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-connect1").build()) + .spec(Connector.ConnectorSpec.builder().connectCluster("local-name").build()) + .build(); when(kafkaConnectClient.delete(ns.getMetadata().getCluster(), - "local-name", "ns-connect1")).thenReturn(Mono.error(new HttpClientResponseException("Error", HttpResponse.serverError()))); + "local-name", "ns-connect1")).thenReturn( + Mono.error(new HttpClientResponseException("Error", HttpResponse.serverError()))); StepVerifier.create(connectorService.delete(ns, connector)) .consumeErrorWith(response -> assertEquals(HttpClientResponseException.class, response.getClass())) diff --git a/src/test/java/com/michelin/ns4kafka/services/ConsumerGroupServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/ConsumerGroupServiceTest.java index 9878d1d2..8f9ffbb4 100644 --- a/src/test/java/com/michelin/ns4kafka/services/ConsumerGroupServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/ConsumerGroupServiceTest.java @@ -1,5 +1,11 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.consumer.group.ConsumerGroupResetOffsets; @@ -8,6 +14,10 @@ import com.michelin.ns4kafka.services.executors.ConsumerGroupAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; import org.apache.kafka.common.TopicPartition; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -15,17 +25,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class ConsumerGroupServiceTest { @Mock @@ -34,336 +33,276 @@ class ConsumerGroupServiceTest { @InjectMocks ConsumerGroupService consumerGroupService; - /** - * Validate reset offsets when to earliest on all topics - */ @Test void doValidationAllTopics() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("*") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("*") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets when to earliest on a given topics - */ @Test void doValidationAllPartitionsFromTopic() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets when to earliest on a given topic-partition - */ @Test void doValidationSpecificPartitionFromTopic() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets fails when missing topic on topic-partition - */ @Test void doValidationMissingTopic() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic(":2") - .method(ResetOffsetsMethod.TO_EARLIEST) - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic(":2") + .method(ResetOffsetsMethod.TO_EARLIEST) + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertEquals(1, result.size()); } - /** - * Validate reset offsets with earliest option - */ @Test void doValidationEarliestOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_EARLIEST) - .options(null) - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_EARLIEST) + .options(null) + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets with latest option - */ @Test void doValidationLatestOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_LATEST) - .options(null) - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_LATEST) + .options(null) + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets with to datetime option - */ @Test void doValidationValidDateTimeOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_DATETIME) - .options("2021-06-02T11:23:33.249+02:00") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_DATETIME) + .options("2021-06-02T11:23:33.249+02:00") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets with to datetime option without ms - */ @Test - void doValidationValidDateTimeOptionDateWithoutMS() { + void doValidationValidDateTimeOptionDateWithoutMs() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_DATETIME) - .options("2021-06-02T11:22:33+02:00") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_DATETIME) + .options("2021-06-02T11:22:33+02:00") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets fails with invalid to datetime option - */ @Test void doValidationInvalidDateTimeOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_DATETIME) - .options("NOT A DATE") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_DATETIME) + .options("NOT A DATE") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertEquals(1, result.size()); } - /** - * Validate reset offsets fails with invalid to datetime option - */ @Test - void doValidationInvalidDateTimeOptionDateWithoutTZ() { + void doValidationInvalidDateTimeOptionDateWithoutTz() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_DATETIME) - .options("2021-06-02T11:22:33.249") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_DATETIME) + .options("2021-06-02T11:22:33.249") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertEquals(1, result.size()); } - /** - * Validate reset offsets fails with invalid to datetime option - */ @Test - void doValidationInvalidDateTimeOptionDateWithInvalidTZ() { + void doValidationInvalidDateTimeOptionDateWithInvalidTz() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.TO_DATETIME) - .options("2021-06-02T11:22:33+99:99") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.TO_DATETIME) + .options("2021-06-02T11:22:33+99:99") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertEquals(1, result.size()); } - /** - * Validate reset offsets with shift by option - */ @Test void doValidationValidMinusShiftByOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.SHIFT_BY) - .options("-5") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.SHIFT_BY) + .options("-5") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets with shift by option - */ @Test void doValidationValidPositiveShiftByOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.SHIFT_BY) - .options("+5") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.SHIFT_BY) + .options("+5") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets fails with shift by option - */ @Test void doValidation_InvalidShiftByOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.SHIFT_BY) - .options("Not an integer") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.SHIFT_BY) + .options("Not an integer") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertEquals(1, result.size()); } - /** - * Validate reset offsets with by duration option - */ @Test void doValidationValidPositiveDurationOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.BY_DURATION) - .options("P4DT11H9M8S") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.BY_DURATION) + .options("P4DT11H9M8S") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets with by duration option - */ @Test void doValidationValidMinusDurationOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.BY_DURATION) - .options("-P4DT11H9M8S") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.BY_DURATION) + .options("-P4DT11H9M8S") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertTrue(result.isEmpty()); } - /** - * Validate reset offsets fails with by duration option - */ @Test void doValidation_InvalidDurationOption() { ConsumerGroupResetOffsets consumerGroupResetOffsets = ConsumerGroupResetOffsets.builder() - .spec(ConsumerGroupResetOffsetsSpec.builder() - .topic("namespace_testTopic01:2") - .method(ResetOffsetsMethod.BY_DURATION) - .options("P4T11H9M8S") - .build()) - .build(); + .spec(ConsumerGroupResetOffsetsSpec.builder() + .topic("namespace_testTopic01:2") + .method(ResetOffsetsMethod.BY_DURATION) + .options("P4T11H9M8S") + .build()) + .build(); List result = consumerGroupService.validateResetOffsets(consumerGroupResetOffsets); assertEquals(1, result.size()); } - /** - * Assert all partitions of all topics are retrieved when required - * @throws ExecutionException Any execution exception during consumer groups description - * @throws InterruptedException Any interrupted exception during consumer groups description - */ @Test void doGetPartitionsToResetAllTopic() throws InterruptedException, ExecutionException { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .cluster("test") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .cluster("test") + .build()) + .build(); String groupId = "testGroup"; String topic = "*"; TopicPartition topicPartition1 = new TopicPartition("topic1", 0); TopicPartition topicPartition2 = new TopicPartition("topic1", 1); TopicPartition topicPartition3 = new TopicPartition("topic2", 0); - List partitionsToReset = List.of(topicPartition1, topicPartition2, topicPartition3); ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = mock(ConsumerGroupAsyncExecutor.class); when(applicationContext.getBean(ConsumerGroupAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster()))).thenReturn(consumerGroupAsyncExecutor); + Qualifiers.byName(namespace.getMetadata().getCluster()))).thenReturn(consumerGroupAsyncExecutor); when(consumerGroupAsyncExecutor.getCommittedOffsets(groupId)).thenReturn( - Map.of(topicPartition1, 5L, - topicPartition2, 10L, - topicPartition3, 5L)); + Map.of(topicPartition1, 5L, + topicPartition2, 10L, + topicPartition3, 5L)); List result = consumerGroupService.getPartitionsToReset(namespace, groupId, topic); assertEquals(3, result.size()); + + List partitionsToReset = List.of(topicPartition1, topicPartition2, topicPartition3); assertEquals(new HashSet<>(partitionsToReset), new HashSet<>(result)); } - /** - * Assert all partitions of given topic are retrieved when required - * @throws ExecutionException Any execution exception during consumer groups description - * @throws InterruptedException Any interrupted exception during consumer groups description - */ @Test void doGetPartitionsToResetOneTopic() throws InterruptedException, ExecutionException { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .cluster("test") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .cluster("test") + .build()) + .build(); String groupId = "testGroup"; String topic = "topic1"; @@ -372,27 +311,22 @@ void doGetPartitionsToResetOneTopic() throws InterruptedException, ExecutionExce ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = mock(ConsumerGroupAsyncExecutor.class); when(applicationContext.getBean(ConsumerGroupAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster()))).thenReturn(consumerGroupAsyncExecutor); + Qualifiers.byName(namespace.getMetadata().getCluster()))).thenReturn(consumerGroupAsyncExecutor); when(consumerGroupAsyncExecutor.getTopicPartitions(topic)).thenReturn( - List.of(topicPartition1, topicPartition2)); + List.of(topicPartition1, topicPartition2)); List result = consumerGroupService.getPartitionsToReset(namespace, groupId, topic); assertEquals(2, result.size()); assertEquals(new HashSet<>(List.of(topicPartition1, topicPartition2)), new HashSet<>(result)); } - /** - * Assert given partition of given topic is retrieved when required - * @throws ExecutionException Any execution exception during consumer groups description - * @throws InterruptedException Any interrupted exception during consumer groups description - */ @Test void doGetPartitionsToResetOneTopicPartition() throws InterruptedException, ExecutionException { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .cluster("test") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .cluster("test") + .build()) + .build(); String groupId = "testGroup"; String topic = "topic1:0"; @@ -404,18 +338,13 @@ void doGetPartitionsToResetOneTopicPartition() throws InterruptedException, Exec assertEquals(List.of(topicPartition1), result); } - /** - * Assert prepare offsets with shift by option is working - * @throws ExecutionException Any execution exception during consumer groups description - * @throws InterruptedException Any interrupted exception during consumer groups description - */ @Test void doPrepareOffsetsToResetShiftBy() throws ExecutionException, InterruptedException { Namespace namespace = Namespace.builder() - .metadata(ObjectMeta.builder() - .cluster("test") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .cluster("test") + .build()) + .build(); String groupId = "testGroup"; String options = "-5"; TopicPartition topicPartition1 = new TopicPartition("topic1", 0); @@ -425,18 +354,19 @@ void doPrepareOffsetsToResetShiftBy() throws ExecutionException, InterruptedExce ResetOffsetsMethod method = ResetOffsetsMethod.SHIFT_BY; ConsumerGroupAsyncExecutor consumerGroupAsyncExecutor = mock(ConsumerGroupAsyncExecutor.class); when(applicationContext.getBean(ConsumerGroupAsyncExecutor.class, - Qualifiers.byName(namespace.getMetadata().getCluster()))).thenReturn(consumerGroupAsyncExecutor); + Qualifiers.byName(namespace.getMetadata().getCluster()))).thenReturn(consumerGroupAsyncExecutor); when(consumerGroupAsyncExecutor.getCommittedOffsets(anyString())).thenReturn( - Map.of(new TopicPartition("topic1", 0), 10L, - new TopicPartition("topic1", 1), 15L, - new TopicPartition("topic2", 0), 10L)); + Map.of(new TopicPartition("topic1", 0), 10L, + new TopicPartition("topic1", 1), 15L, + new TopicPartition("topic2", 0), 10L)); when(consumerGroupAsyncExecutor.checkOffsetsRange( - Map.of(new TopicPartition("topic1", 0), 5L, - new TopicPartition("topic1", 1), 10L))).thenReturn( - Map.of(new TopicPartition("topic1", 0), 5L, - new TopicPartition("topic1", 1), 10L)); + Map.of(new TopicPartition("topic1", 0), 5L, + new TopicPartition("topic1", 1), 10L))).thenReturn( + Map.of(new TopicPartition("topic1", 0), 5L, + new TopicPartition("topic1", 1), 10L)); - Map result = consumerGroupService.prepareOffsetsToReset(namespace, groupId, options, partitionsToReset, method); + Map result = + consumerGroupService.prepareOffsetsToReset(namespace, groupId, options, partitionsToReset, method); assertEquals(2, result.size()); assertTrue(result.containsKey(topicPartition1)); diff --git a/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java index ec3eb39d..c0ee1377 100644 --- a/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/NamespaceServiceTest.java @@ -1,25 +1,31 @@ package com.michelin.ns4kafka.services; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig.ConnectConfig; -import com.michelin.ns4kafka.models.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +import com.michelin.ns4kafka.models.AccessControlEntry; +import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; +import com.michelin.ns4kafka.models.ObjectMeta; +import com.michelin.ns4kafka.models.RoleBinding; +import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.models.connect.cluster.ConnectCluster; import com.michelin.ns4kafka.models.connector.Connector; +import com.michelin.ns4kafka.models.quota.ResourceQuota; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; +import com.michelin.ns4kafka.properties.ManagedClusterProperties.ConnectProperties; import com.michelin.ns4kafka.repositories.NamespaceRepository; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Stream; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.stream.Stream; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class NamespaceServiceTest { @Mock @@ -38,7 +44,13 @@ class NamespaceServiceTest { ConnectorService connectorService; @Mock - List kafkaAsyncExecutorConfigList; + ConnectClusterService connectClusterService; + + @Mock + ResourceQuotaService resourceQuotaService; + + @Mock + List managedClusterPropertiesList; @InjectMocks NamespaceService namespaceService; @@ -46,17 +58,17 @@ class NamespaceServiceTest { @Test void validationCreationNoClusterFail() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(namespaceRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); List result = namespaceService.validateCreation(ns); assertEquals(1, result.size()); @@ -68,32 +80,32 @@ void validationCreationNoClusterFail() { void validationCreationKafkaUserAlreadyExistFail() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace2") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig1 = new KafkaAsyncExecutorConfig("local"); - - when(kafkaAsyncExecutorConfigList.stream()) - .thenReturn(Stream.of(kafkaAsyncExecutorConfig1)); + .metadata(ObjectMeta.builder() + .name("namespace2") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); + ManagedClusterProperties managedClusterProperties1 = new ManagedClusterProperties("local"); + + when(managedClusterPropertiesList.stream()) + .thenReturn(Stream.of(managedClusterProperties1)); when(namespaceRepository.findAllForCluster("local")) - .thenReturn(List.of(ns2)); + .thenReturn(List.of(ns2)); List result = namespaceService.validateCreation(ns); assertEquals(1, result.size()); @@ -105,56 +117,56 @@ void validationCreationKafkaUserAlreadyExistFail() { void validateCreationNoNamespaceSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); - - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig1 = new KafkaAsyncExecutorConfig("local"); - - when(kafkaAsyncExecutorConfigList.stream()) - .thenReturn(Stream.of(kafkaAsyncExecutorConfig1)); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); + + ManagedClusterProperties managedClusterProperties1 = new ManagedClusterProperties("local"); + + when(managedClusterPropertiesList.stream()) + .thenReturn(Stream.of(managedClusterProperties1)); when(namespaceRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); List result = namespaceService.validateCreation(ns); assertTrue(result.isEmpty()); } @Test - void validateCreationANamespaceAlreadyExistsSuccess() { + void validateCreationNamespaceAlreadyExistsSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace2") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user2") - .build()) - .build(); - - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig1 = new KafkaAsyncExecutorConfig("local"); - - when(kafkaAsyncExecutorConfigList.stream()) - .thenReturn(Stream.of(kafkaAsyncExecutorConfig1)); + .metadata(ObjectMeta.builder() + .name("namespace2") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user2") + .build()) + .build(); + + ManagedClusterProperties managedClusterProperties1 = new ManagedClusterProperties("local"); + + when(managedClusterPropertiesList.stream()) + .thenReturn(Stream.of(managedClusterProperties1)); when(namespaceRepository.findAllForCluster("local")) - .thenReturn(List.of(ns2)); + .thenReturn(List.of(ns2)); List result = namespaceService.validateCreation(ns); assertTrue(result.isEmpty()); @@ -164,21 +176,21 @@ void validateCreationANamespaceAlreadyExistsSuccess() { void validateSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); - - KafkaAsyncExecutorConfig kafka = new KafkaAsyncExecutorConfig("local"); - kafka.setConnects(Map.of("local-name", new ConnectConfig())); - - when(kafkaAsyncExecutorConfigList.stream()) - .thenReturn(Stream.of(kafka)); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); + + ManagedClusterProperties kafka = new ManagedClusterProperties("local"); + kafka.setConnects(Map.of("local-name", new ConnectProperties())); + + when(managedClusterPropertiesList.stream()) + .thenReturn(Stream.of(kafka)); List result = namespaceService.validate(ns); @@ -190,21 +202,21 @@ void validateSuccess() { void validateFail() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); - - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig1 = new KafkaAsyncExecutorConfig("local"); - kafkaAsyncExecutorConfig1.setConnects(Map.of("other-connect-config", new ConnectConfig())); - - when(kafkaAsyncExecutorConfigList.stream()) - .thenReturn(Stream.of(kafkaAsyncExecutorConfig1)); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); + + ManagedClusterProperties managedClusterProperties1 = new ManagedClusterProperties("local"); + managedClusterProperties1.setConnects(Map.of("other-connect-config", new ConnectProperties())); + + when(managedClusterPropertiesList.stream()) + .thenReturn(Stream.of(managedClusterProperties1)); List result = namespaceService.validate(ns); @@ -216,46 +228,46 @@ void validateFail() { void listAll() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace2") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user2") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace2") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user2") + .build()) + .build(); Namespace ns3 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace3") - .cluster("other-cluster") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user3") - .build()) - .build(); - - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig1 = new KafkaAsyncExecutorConfig("local"); - KafkaAsyncExecutorConfig kafkaAsyncExecutorConfig2 = new KafkaAsyncExecutorConfig("other-cluster"); - - when(kafkaAsyncExecutorConfigList.stream()) - - .thenReturn(Stream.of(kafkaAsyncExecutorConfig1, kafkaAsyncExecutorConfig2)); + .metadata(ObjectMeta.builder() + .name("namespace3") + .cluster("other-cluster") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user3") + .build()) + .build(); + + ManagedClusterProperties managedClusterProperties1 = new ManagedClusterProperties("local"); + ManagedClusterProperties managedClusterProperties2 = new ManagedClusterProperties("other-cluster"); + + when(managedClusterPropertiesList.stream()) + + .thenReturn(Stream.of(managedClusterProperties1, managedClusterProperties2)); when(namespaceRepository.findAllForCluster("local")) - .thenReturn(List.of(ns, ns2)); + .thenReturn(List.of(ns, ns2)); when(namespaceRepository.findAllForCluster("other-cluster")) - .thenReturn(List.of(ns3)); + .thenReturn(List.of(ns3)); List result = namespaceService.listAll(); @@ -266,25 +278,28 @@ void listAll() { @Test void listAllNamespaceResourcesEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(roleBindingService.list("namespace")) - .thenReturn(List.of()); + .thenReturn(List.of()); when(accessControlEntryService.findAllForNamespace(ns)) - .thenReturn(List.of()); - + .thenReturn(List.of()); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.empty()); List result = namespaceService.listAllNamespaceResources(ns); assertTrue(result.isEmpty()); @@ -293,141 +308,228 @@ void listAllNamespaceResourcesEmpty() { @Test void listAllNamespaceResourcesTopic() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); Topic topic = Topic.builder() .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) + .name("topic") + .namespace("namespace") + .build()) .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic)); + .thenReturn(List.of(topic)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(roleBindingService.list("namespace")) - .thenReturn(List.of()); + .thenReturn(List.of()); when(accessControlEntryService.findAllForNamespace(ns)) - .thenReturn(List.of()); - + .thenReturn(List.of()); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.empty()); List result = namespaceService.listAllNamespaceResources(ns); - assertEquals(1,result.size()); - assertEquals("Topic/topic",result.get(0)); + assertEquals(1, result.size()); + assertEquals("Topic/topic", result.get(0)); } @Test void listAllNamespaceResourcesConnect() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); Connector connector = Connector.builder() .metadata(ObjectMeta.builder() - .name("connector") - .namespace("namespace") - .build()) + .name("connector") + .namespace("namespace") + .build()) .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of(connector)); + .thenReturn(List.of(connector)); when(roleBindingService.list("namespace")) - .thenReturn(List.of()); + .thenReturn(List.of()); when(accessControlEntryService.findAllForNamespace(ns)) - .thenReturn(List.of()); - + .thenReturn(List.of()); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.empty()); List result = namespaceService.listAllNamespaceResources(ns); - assertEquals(1,result.size()); - assertEquals("Connector/connector",result.get(0)); + assertEquals(1, result.size()); + assertEquals("Connector/connector", result.get(0)); } @Test void listAllNamespaceResourcesRoleBinding() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); RoleBinding rb = RoleBinding.builder() .metadata(ObjectMeta.builder() - .name("rolebinding") - .namespace("namespace") - .build()) + .name("rolebinding") + .namespace("namespace") + .build()) .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(roleBindingService.list("namespace")) - .thenReturn(List.of(rb)); + .thenReturn(List.of(rb)); when(accessControlEntryService.findAllForNamespace(ns)) - .thenReturn(List.of()); - + .thenReturn(List.of()); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.empty()); List result = namespaceService.listAllNamespaceResources(ns); - assertEquals(1,result.size()); - assertEquals("RoleBinding/rolebinding",result.get(0)); + assertEquals(1, result.size()); + assertEquals("RoleBinding/rolebinding", result.get(0)); } @Test void listAllNamespaceResourcesAccessControlEntry() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .kafkaUser("user") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); AccessControlEntry ace = AccessControlEntry.builder() .metadata(ObjectMeta.builder() - .name("ace") - .namespace("namespace") - .build()) + .name("ace") + .namespace("namespace") + .build()) .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(roleBindingService.list("namespace")) - .thenReturn(List.of()); + .thenReturn(List.of()); when(accessControlEntryService.findAllForNamespace(ns)) - .thenReturn(List.of(ace)); + .thenReturn(List.of(ace)); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.empty()); + List result = namespaceService.listAllNamespaceResources(ns); + assertEquals(1, result.size()); + assertEquals("AccessControlEntry/ace", result.get(0)); + } + + @Test + void listAllNamespaceResourcesConnectCluster() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); + + ConnectCluster connectCluster = ConnectCluster.builder() + .metadata(ObjectMeta.builder() + .name("connect-cluster") + .namespace("namespace") + .build()) + .build(); + + when(topicService.findAllForNamespace(ns)) + .thenReturn(List.of()); + when(connectorService.findAllForNamespace(ns)) + .thenReturn(List.of()); + when(roleBindingService.list("namespace")) + .thenReturn(List.of()); + when(accessControlEntryService.findAllForNamespace(ns)) + .thenReturn(List.of()); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of(connectCluster)); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.empty()); List result = namespaceService.listAllNamespaceResources(ns); - assertEquals(1,result.size()); - assertEquals("AccessControlEntry/ace",result.get(0)); + assertEquals(1, result.size()); + assertEquals("ConnectCluster/connect-cluster", result.get(0)); } + @Test + void listAllNamespaceResourcesQuota() { + Namespace ns = Namespace.builder() + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .kafkaUser("user") + .build()) + .build(); + + ResourceQuota resourceQuota = ResourceQuota.builder() + .metadata(ObjectMeta.builder() + .name("resource-quota") + .namespace("namespace") + .build()) + .build(); + + when(topicService.findAllForNamespace(ns)) + .thenReturn(List.of()); + when(connectorService.findAllForNamespace(ns)) + .thenReturn(List.of()); + when(roleBindingService.list("namespace")) + .thenReturn(List.of()); + when(accessControlEntryService.findAllForNamespace(ns)) + .thenReturn(List.of()); + when(connectClusterService.findAllByNamespaceOwner(ns)) + .thenReturn(List.of()); + when(resourceQuotaService.findByNamespace("namespace")) + .thenReturn(Optional.of(resourceQuota)); + + List result = namespaceService.listAllNamespaceResources(ns); + assertEquals(1, result.size()); + assertEquals("ResourceQuota/resource-quota", result.get(0)); + } } diff --git a/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java index 18abde93..cd9630b8 100644 --- a/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/ResourceQuotaServiceTest.java @@ -1,5 +1,20 @@ package com.michelin.ns4kafka.services; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.COUNT_CONNECTORS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.COUNT_PARTITIONS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.COUNT_TOPICS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.DISK_TOPICS; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.USER_CONSUMER_BYTE_RATE; +import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.USER_PRODUCER_BYTE_RATE; +import static org.apache.kafka.common.config.TopicConfig.RETENTION_BYTES_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; @@ -7,22 +22,15 @@ import com.michelin.ns4kafka.models.quota.ResourceQuota; import com.michelin.ns4kafka.models.quota.ResourceQuotaResponse; import com.michelin.ns4kafka.repositories.ResourceQuotaRepository; +import java.util.List; +import java.util.Map; +import java.util.Optional; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static com.michelin.ns4kafka.models.quota.ResourceQuota.ResourceQuotaSpecKey.*; -import static org.apache.kafka.common.config.TopicConfig.RETENTION_BYTES_CONFIG; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class ResourceQuotaServiceTest { @InjectMocks @@ -37,178 +45,159 @@ class ResourceQuotaServiceTest { @Mock ConnectorService connectorService; - @Mock - NamespaceService namespaceService; - - /** - * Test get quota by namespace when it is defined - */ @Test void findByNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "1")) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); - Optional resourceQuotaOptional = resourceQuotaService.findByNamespace(ns.getMetadata().getName()); + Optional resourceQuotaOptional = + resourceQuotaService.findByNamespace(ns.getMetadata().getName()); assertTrue(resourceQuotaOptional.isPresent()); assertEquals("test", resourceQuotaOptional.get().getMetadata().getName()); } - /** - * Test get quota by namespace when it is empty - */ @Test void findByNamespaceEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); - Optional resourceQuotaOptional = resourceQuotaService.findByNamespace(ns.getMetadata().getName()); + Optional resourceQuotaOptional = + resourceQuotaService.findByNamespace(ns.getMetadata().getName()); assertTrue(resourceQuotaOptional.isEmpty()); } - /** - * Test get quota by name - */ @Test void findByName() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "1")) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); - Optional resourceQuotaOptional = resourceQuotaService.findByName(ns.getMetadata().getName(), "test"); + Optional resourceQuotaOptional = + resourceQuotaService.findByName(ns.getMetadata().getName(), "test"); assertTrue(resourceQuotaOptional.isPresent()); assertEquals("test", resourceQuotaOptional.get().getMetadata().getName()); } - /** - * Test get quota by wrong name - */ @Test void findByNameWrongName() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "1")) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); - Optional resourceQuotaOptional = resourceQuotaService.findByName(ns.getMetadata().getName(), "wrong-name"); + Optional resourceQuotaOptional = + resourceQuotaService.findByName(ns.getMetadata().getName(), "wrong-name"); assertTrue(resourceQuotaOptional.isEmpty()); } - /** - * Test get quota when there is no quota defined on the namespace - */ @Test void findByNameEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); - Optional resourceQuotaOptional = resourceQuotaService.findByName(ns.getMetadata().getName(), "test"); + Optional resourceQuotaOptional = + resourceQuotaService.findByName(ns.getMetadata().getName(), "test"); assertTrue(resourceQuotaOptional.isEmpty()); } - /** - * Test create quota - */ @Test void create() { ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "1")) + .build(); when(resourceQuotaRepository.create(resourceQuota)) - .thenReturn(resourceQuota); + .thenReturn(resourceQuota); ResourceQuota createdResourceQuota = resourceQuotaService.create(resourceQuota); assertEquals(resourceQuota, createdResourceQuota); verify(resourceQuotaRepository, times(1)).create(resourceQuota); } - /** - * Test delete quota - */ @Test void delete() { ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "1")) + .build(); doNothing().when(resourceQuotaRepository).delete(resourceQuota); @@ -216,303 +205,284 @@ void delete() { verify(resourceQuotaRepository, times(1)).delete(resourceQuota); } - /** - * Test successful validation when creating quota - */ @Test void validateNewQuotaAgainstCurrentResourceSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "10")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "10")) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(0, validationErrors.size()); } - /** - * Test validation when creating quota on count/topics - */ @Test void validateNewQuotaAgainstCurrentResourceForCountTopics() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "2")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "2")) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(1, validationErrors.size()); assertEquals("Quota already exceeded for count/topics: 3/2 (used/limit)", validationErrors.get(0)); } - /** - * Test validation when creating quota on count/partitions - */ @Test void validateNewQuotaAgainstCurrentResourceForCountPartitions() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_PARTITIONS.toString(), "10")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_PARTITIONS.toString(), "10")) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(1, validationErrors.size()); assertEquals("Quota already exceeded for count/partitions: 19/10 (used/limit)", validationErrors.get(0)); } - /** - * Test format when creating quota on disk/topics - */ @Test void validateNewQuotaDiskTopicsFormat() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(DISK_TOPICS.toString(), "10")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(DISK_TOPICS.toString(), "10")) + .build(); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(1, validationErrors.size()); - assertEquals("Invalid value for disk/topics: value must end with either B, KiB, MiB or GiB", validationErrors.get(0)); + assertEquals("Invalid value for disk/topics: value must end with either B, KiB, MiB or GiB", + validationErrors.get(0)); } - /** - * Test validation when creating quota on disk/topics - */ @Test void validateNewQuotaAgainstCurrentResourceForDiskTopics() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(DISK_TOPICS.toString(), "5000B")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(DISK_TOPICS.toString(), "5000B")) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of("retention.bytes", "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of("retention.bytes", "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of("retention.bytes", "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of("retention.bytes", "1000")) + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2)); + .thenReturn(List.of(topic1, topic2)); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(1, validationErrors.size()); assertEquals("Quota already exceeded for disk/topics: 8.79KiB/5000B (used/limit)", validationErrors.get(0)); } - /** - * Test validation when creating quota on count/connectors - */ @Test void validateNewQuotaAgainstCurrentResourceForCountConnectors() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_CONNECTORS.toString(), "1")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_CONNECTORS.toString(), "1")) + .build(); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(1, validationErrors.size()); assertEquals("Quota already exceeded for count/connectors: 2/1 (used/limit)", validationErrors.get(0)); } - /** - * Test validation errors when creating quota on user/consumer_byte_rate with string instead of number - */ @Test void validateUserQuotaFormatError() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(USER_PRODUCER_BYTE_RATE.toString(), "producer", USER_CONSUMER_BYTE_RATE.toString(), "consumer")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec( + Map.of(USER_PRODUCER_BYTE_RATE.toString(), "producer", USER_CONSUMER_BYTE_RATE.toString(), "consumer")) + .build(); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); @@ -521,198 +491,182 @@ void validateUserQuotaFormatError() { assertEquals("Number expected for user/consumer_byte_rate (consumer given)", validationErrors.get(1)); } - /** - * Test validation when creating quota on user/consumer_byte_rate - */ @Test void validateUserQuotaFormatSuccess() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(USER_PRODUCER_BYTE_RATE.toString(), "102400", USER_CONSUMER_BYTE_RATE.toString(), "102400")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(USER_PRODUCER_BYTE_RATE.toString(), "102400", USER_CONSUMER_BYTE_RATE.toString(), "102400")) + .build(); List validationErrors = resourceQuotaService.validateNewResourceQuota(ns, resourceQuota); assertEquals(0, validationErrors.size()); } - /** - * Test get current used resource for count topics - */ @Test void getCurrentUsedResourceForCountTopicsByNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); long currentlyUsed = resourceQuotaService.getCurrentCountTopicsByNamespace(ns); assertEquals(3L, currentlyUsed); } - /** - - * Test get current used resource for count partitions by namespace - */ @Test void getCurrentUsedResourceForCountPartitionsByNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); long currentlyUsed = resourceQuotaService.getCurrentCountPartitionsByNamespace(ns); assertEquals(19L, currentlyUsed); } - /** - * Test get current used resource for count connectors by namespace - */ @Test void getCurrentUsedResourceForCountConnectorsByNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); long currentlyUsed = resourceQuotaService.getCurrentCountConnectorsByNamespace(ns); assertEquals(2L, currentlyUsed); } - /** - * Test get current used resource for disk topics by namespace - */ @Test void getCurrentUsedResourceForDiskTopicsByNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of("retention.bytes", "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of("retention.bytes", "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of("retention.bytes", "50000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of("retention.bytes", "50000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of("retention.bytes", "2500")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of("retention.bytes", "2500")) + .build()) + .build(); when(topicService.findAllForNamespace(ns)).thenReturn(List.of(topic1, topic2, topic3)); @@ -720,432 +674,416 @@ void getCurrentUsedResourceForDiskTopicsByNamespace() { assertEquals(181000L, currentlyUsed); } - /** - * Test quota validation on topics - */ @Test void validateTopicQuota() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "4")) - .spec(Map.of(COUNT_PARTITIONS.toString(), "25")) - .spec(Map.of(DISK_TOPICS.toString(), "2GiB")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "4")) + .spec(Map.of(COUNT_PARTITIONS.toString(), "25")) + .spec(Map.of(DISK_TOPICS.toString(), "2GiB")) + .build(); Topic newTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); List validationErrors = resourceQuotaService.validateTopicQuota(ns, Optional.empty(), newTopic); assertEquals(0, validationErrors.size()); } - /** - * Test quota validation on topics when there is no quota defined - */ @Test void validateTopicQuotaNoQuota() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Topic newTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); List validationErrors = resourceQuotaService.validateTopicQuota(ns, Optional.empty(), newTopic); assertEquals(0, validationErrors.size()); } - /** - * Test quota validation on topics when quota is being exceeded - */ @Test void validateTopicQuotaExceed() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "3", COUNT_PARTITIONS.toString(), "20", DISK_TOPICS.toString(), "20KiB")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "3", COUNT_PARTITIONS.toString(), "20", DISK_TOPICS.toString(), + "20KiB")) + .build(); Topic newTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); List validationErrors = resourceQuotaService.validateTopicQuota(ns, Optional.empty(), newTopic); assertEquals(3, validationErrors.size()); - assertEquals("Exceeding quota for count/topics: 3/3 (used/limit). Cannot add 1 topic.", validationErrors.get(0)); - assertEquals("Exceeding quota for count/partitions: 19/20 (used/limit). Cannot add 6 partition(s).", validationErrors.get(1)); - assertEquals("Exceeding quota for disk/topics: 18.555KiB/20.0KiB (used/limit). Cannot add 5.86KiB of data.", validationErrors.get(2)); + assertEquals("Exceeding quota for count/topics: 3/3 (used/limit). Cannot add 1 topic.", + validationErrors.get(0)); + assertEquals("Exceeding quota for count/partitions: 19/20 (used/limit). Cannot add 6 partition(s).", + validationErrors.get(1)); + assertEquals("Exceeding quota for disk/topics: 18.555KiB/20.0KiB (used/limit). Cannot add 5.86KiB of data.", + validationErrors.get(2)); } - /** - * Test quota validation on topic update when quota is being exceeded - */ @Test void validateUpdateTopicQuotaExceed() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "3", COUNT_PARTITIONS.toString(), "20", DISK_TOPICS.toString(), "20KiB")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "3", COUNT_PARTITIONS.toString(), "20", DISK_TOPICS.toString(), + "20KiB")) + .build(); Topic newTopic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1500")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1500")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of(RETENTION_BYTES_CONFIG, "1000")) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); List validationErrors = resourceQuotaService.validateTopicQuota(ns, Optional.of(topic1), newTopic); assertEquals(1, validationErrors.size()); - assertEquals("Exceeding quota for disk/topics: 18.555KiB/20.0KiB (used/limit). Cannot add 2.93KiB of data.", validationErrors.get(0)); + assertEquals("Exceeding quota for disk/topics: 18.555KiB/20.0KiB (used/limit). Cannot add 2.93KiB of data.", + validationErrors.get(0)); } - /** - * Test quota validation on connectors - */ @Test void validateConnectorQuota() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_CONNECTORS.toString(), "3")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_CONNECTORS.toString(), "3")) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); List validationErrors = resourceQuotaService.validateConnectorQuota(ns); assertEquals(0, validationErrors.size()); } - /** - * Test quota validation on connectors when there is no quota defined - */ @Test void validateConnectorQuotaNoQuota() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.empty()); + .thenReturn(Optional.empty()); List validationErrors = resourceQuotaService.validateConnectorQuota(ns); assertEquals(0, validationErrors.size()); } - /** - * Test quota validation on connectors when quota is being exceeded - */ @Test void validateConnectorQuotaExceed() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_CONNECTORS.toString(), "2")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_CONNECTORS.toString(), "2")) + .build(); when(resourceQuotaRepository.findForNamespace("namespace")) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); List validationErrors = resourceQuotaService.validateConnectorQuota(ns); assertEquals(1, validationErrors.size()); - assertEquals("Exceeding quota for count/connectors: 2/2 (used/limit). Cannot add 1 connector.", validationErrors.get(0)); + assertEquals("Exceeding quota for count/connectors: 2/2 (used/limit). Cannot add 1 connector.", + validationErrors.get(0)); } - /** - * Validate get current used resources by quota for a namespace - */ @Test void getUsedResourcesByQuotaByNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "3", - COUNT_PARTITIONS.toString(), "20", - COUNT_CONNECTORS.toString(), "2", - DISK_TOPICS.toString(), "60KiB")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "3", + COUNT_PARTITIONS.toString(), "20", + COUNT_CONNECTORS.toString(), "2", + DISK_TOPICS.toString(), "60KiB")) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of("retention.bytes", "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of("retention.bytes", "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of("retention.bytes", "2000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of("retention.bytes", "2000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of("retention.bytes", "4000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of("retention.bytes", "4000")) + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); - ResourceQuotaResponse response = resourceQuotaService.getUsedResourcesByQuotaByNamespace(ns, Optional.of(resourceQuota)); + ResourceQuotaResponse response = + resourceQuotaService.getUsedResourcesByQuotaByNamespace(ns, Optional.of(resourceQuota)); assertEquals(resourceQuota.getMetadata(), response.getMetadata()); assertEquals("3/3", response.getSpec().getCountTopic()); assertEquals("19/20", response.getSpec().getCountPartition()); @@ -1153,60 +1091,57 @@ void getUsedResourcesByQuotaByNamespace() { assertEquals("50.782KiB/60KiB", response.getSpec().getDiskTopic()); } - /** - * Validate get current used resources by quota for a namespace when there is no quota applied - */ @Test void getCurrentResourcesQuotasByNamespaceNoQuota() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of("retention.bytes", "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of("retention.bytes", "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of("retention.bytes", "2000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of("retention.bytes", "2000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of("retention.bytes", "4000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of("retention.bytes", "4000")) + .build()) + .build(); when(topicService.findAllForNamespace(ns)) - .thenReturn(List.of(topic1, topic2, topic3)); + .thenReturn(List.of(topic1, topic2, topic3)); when(connectorService.findAllForNamespace(ns)) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); ResourceQuotaResponse response = resourceQuotaService.getUsedResourcesByQuotaByNamespace(ns, Optional.empty()); assertEquals("namespace", response.getMetadata().getNamespace()); @@ -1217,113 +1152,109 @@ void getCurrentResourcesQuotasByNamespaceNoQuota() { assertEquals("50.782KiB", response.getSpec().getDiskTopic()); } - /** - * Validate get current used resources by quota for all namespaces - */ @Test - void getCurrentResourcesQuotasAllNamespaces() { + void getUsedQuotaByNamespaces() { Namespace ns1 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Namespace ns2 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace2") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace2") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Namespace ns3 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace3") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace3") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Namespace ns4 = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace4") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace4") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); ResourceQuota resourceQuota = ResourceQuota.builder() - .metadata(ObjectMeta.builder() - .cluster("local") - .name("test") - .build()) - .spec(Map.of(COUNT_TOPICS.toString(), "3", - COUNT_PARTITIONS.toString(), "20", - COUNT_CONNECTORS.toString(), "2", - DISK_TOPICS.toString(), "60KiB")) - .build(); + .metadata(ObjectMeta.builder() + .cluster("local") + .name("test") + .build()) + .spec(Map.of(COUNT_TOPICS.toString(), "3", + COUNT_PARTITIONS.toString(), "20", + COUNT_CONNECTORS.toString(), "2", + DISK_TOPICS.toString(), "60KiB")) + .build(); Topic topic1 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(6) - .configs(Map.of("retention.bytes", "1000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(6) + .configs(Map.of("retention.bytes", "1000")) + .build()) + .build(); Topic topic2 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace2") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(3) - .configs(Map.of("retention.bytes", "2000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace2") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(3) + .configs(Map.of("retention.bytes", "2000")) + .build()) + .build(); Topic topic3 = Topic.builder() - .metadata(ObjectMeta.builder() - .name("topic") - .namespace("namespace3") - .build()) - .spec(Topic.TopicSpec.builder() - .partitions(10) - .configs(Map.of("retention.bytes", "4000")) - .build()) - .build(); - - when(namespaceService.listAll()) - .thenReturn(List.of(ns1, ns2, ns3, ns4)); + .metadata(ObjectMeta.builder() + .name("topic") + .namespace("namespace3") + .build()) + .spec(Topic.TopicSpec.builder() + .partitions(10) + .configs(Map.of("retention.bytes", "4000")) + .build()) + .build(); + when(topicService.findAllForNamespace(ns1)) - .thenReturn(List.of(topic1)); + .thenReturn(List.of(topic1)); when(topicService.findAllForNamespace(ns2)) - .thenReturn(List.of(topic2)); + .thenReturn(List.of(topic2)); when(topicService.findAllForNamespace(ns3)) - .thenReturn(List.of(topic3)); + .thenReturn(List.of(topic3)); when(topicService.findAllForNamespace(ns4)) - .thenReturn(List.of()); + .thenReturn(List.of()); when(connectorService.findAllForNamespace(any())) - .thenReturn(List.of( - Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), - Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); + .thenReturn(List.of( + Connector.builder().metadata(ObjectMeta.builder().name("connect1").build()).build(), + Connector.builder().metadata(ObjectMeta.builder().name("connect2").build()).build())); when(resourceQuotaRepository.findForNamespace(any())) - .thenReturn(Optional.of(resourceQuota)); + .thenReturn(Optional.of(resourceQuota)); - List response = resourceQuotaService.getUsedResourcesByQuotaForAllNamespaces(); + List response = + resourceQuotaService.getUsedQuotaByNamespaces(List.of(ns1, ns2, ns3, ns4)); assertEquals(4, response.size()); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/RoleBindingServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/RoleBindingServiceTest.java index 1d8f5fc5..bd318e04 100644 --- a/src/test/java/com/michelin/ns4kafka/services/RoleBindingServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/RoleBindingServiceTest.java @@ -1,19 +1,18 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.RoleBinding; import com.michelin.ns4kafka.repositories.RoleBindingRepository; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class RoleBindingServiceTest { @Mock @@ -25,25 +24,25 @@ class RoleBindingServiceTest { @Test void findByName() { RoleBinding rb1 = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("namespace-rb1") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace-rb1") + .cluster("local") + .build()) + .build(); RoleBinding rb2 = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("namespace-rb2") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace-rb2") + .cluster("local") + .build()) + .build(); RoleBinding rb3 = RoleBinding.builder() - .metadata(ObjectMeta.builder() - .name("namespace-rb3") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace-rb3") + .cluster("local") + .build()) + .build(); - when(roleBindingRepository.findAllForNamespace("namespace")).thenReturn(List.of(rb1,rb2,rb3)); + when(roleBindingRepository.findAllForNamespace("namespace")).thenReturn(List.of(rb1, rb2, rb3)); var result = roleBindingService.findByName("namespace", "namespace-rb2"); assertEquals(rb2, result.get()); diff --git a/src/test/java/com/michelin/ns4kafka/services/SchemaServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/SchemaServiceTest.java index 06ec69a9..4a900533 100644 --- a/src/test/java/com/michelin/ns4kafka/services/SchemaServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/SchemaServiceTest.java @@ -1,5 +1,12 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; @@ -8,6 +15,8 @@ import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityCheckResponse; import com.michelin.ns4kafka.services.clients.schema.entities.SchemaCompatibilityResponse; import com.michelin.ns4kafka.services.clients.schema.entities.SchemaResponse; +import java.util.Arrays; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; @@ -17,14 +26,6 @@ import reactor.core.publisher.Mono; import reactor.test.StepVerifier; -import java.util.Arrays; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - @ExtendWith(MockitoExtension.class) class SchemaServiceTest { @InjectMocks @@ -36,54 +37,53 @@ class SchemaServiceTest { @Mock SchemaRegistryClient schemaRegistryClient; - /** - * Test to find all schemas by namespace - */ @Test void getAllByNamespace() { Namespace namespace = buildNamespace(); - List subjectsResponse = Arrays.asList("prefix.schema-one", "prefix2.schema-two", "prefix2.schema-three"); + List subjectsResponse = + Arrays.asList("prefix.schema-one", "prefix2.schema-two", "prefix2.schema-three"); - when(schemaRegistryClient.getSubjects(namespace.getMetadata().getCluster())).thenReturn(Flux.fromIterable(subjectsResponse)); + when(schemaRegistryClient.getSubjects(namespace.getMetadata().getCluster())).thenReturn( + Flux.fromIterable(subjectsResponse)); when(accessControlEntryService.findAllGrantedToNamespace(namespace)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("prefix.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("prefix2.schema-two") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.READ) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("prefix3.") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resource("ns-") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("prefix.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("prefix2.schema-two") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.READ) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("prefix3.") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resource("ns-") + .build()) + .build() + )); StepVerifier.create(schemaService.findAllForNamespace(namespace)) .consumeNextWith(schema -> assertEquals("prefix.schema-one", schema.getMetadata().getName())) @@ -91,9 +91,6 @@ void getAllByNamespace() { .verifyComplete(); } - /** - * Test to find all schemas by namespace and the response from the schema registry is empty - */ @Test void getAllByNamespaceEmptyResponse() { Namespace namespace = buildNamespace(); @@ -101,19 +98,18 @@ void getAllByNamespaceEmptyResponse() { when(schemaRegistryClient.getSubjects(namespace.getMetadata().getCluster())).thenReturn(Flux.empty()); StepVerifier.create(schemaService.findAllForNamespace(namespace)) - .verifyComplete(); + .verifyComplete(); } - /** - * Test to find all schemas by namespace - */ @Test void getBySubjectAndVersion() { Namespace namespace = buildNamespace(); SchemaCompatibilityResponse compatibilityResponse = buildCompatibilityResponse(); - when(schemaRegistryClient.getLatestSubject(namespace.getMetadata().getCluster(), "prefix.schema-one")).thenReturn(Mono.just(buildSchemaResponse("prefix.schema-one"))); - when(schemaRegistryClient.getCurrentCompatibilityBySubject(any(), any())).thenReturn(Mono.just(compatibilityResponse)); + when(schemaRegistryClient.getLatestSubject(namespace.getMetadata().getCluster(), + "prefix.schema-one")).thenReturn(Mono.just(buildSchemaResponse("prefix.schema-one"))); + when(schemaRegistryClient.getCurrentCompatibilityBySubject(any(), any())).thenReturn( + Mono.just(compatibilityResponse)); StepVerifier.create(schemaService.getLatestSubject(namespace, "prefix.schema-one")) .consumeNextWith(latestSubject -> { @@ -124,97 +120,81 @@ void getBySubjectAndVersion() { .verifyComplete(); } - /** - * Test to find all schemas by namespace - */ @Test void getBySubjectAndVersionEmptyResponse() { Namespace namespace = buildNamespace(); when(schemaRegistryClient.getLatestSubject(namespace.getMetadata().getCluster(), "prefix.schema-one")) - .thenReturn(Mono.empty()); + .thenReturn(Mono.empty()); StepVerifier.create(schemaService.getLatestSubject(namespace, "prefix.schema-one")) - .verifyComplete(); + .verifyComplete(); } - - /** - * Test to register a new schema to the schema registry - */ @Test void register() { Namespace namespace = buildNamespace(); Schema schema = buildSchema(); when(schemaRegistryClient.register(any(), any(), any())) - .thenReturn(Mono.just(SchemaResponse.builder().id(1).version(1).build())); + .thenReturn(Mono.just(SchemaResponse.builder().id(1).version(1).build())); StepVerifier.create(schemaService.register(namespace, schema)) - .consumeNextWith(id -> assertEquals(1, id)) - .verifyComplete(); + .consumeNextWith(id -> assertEquals(1, id)) + .verifyComplete(); } - /** - * Test to delete a subject - */ @Test void deleteSubject() { Namespace namespace = buildNamespace(); when(schemaRegistryClient.deleteSubject(namespace.getMetadata().getCluster(), - "prefix.schema-one", false)).thenReturn(Mono.just(new Integer[]{1})); + "prefix.schema-one", false)).thenReturn(Mono.just(new Integer[] {1})); when(schemaRegistryClient.deleteSubject(namespace.getMetadata().getCluster(), - "prefix.schema-one", true)).thenReturn(Mono.just(new Integer[]{1})); + "prefix.schema-one", true)).thenReturn(Mono.just(new Integer[] {1})); StepVerifier.create(schemaService.deleteSubject(namespace, "prefix.schema-one")) - .consumeNextWith(ids -> { - assertEquals(1, ids.length); - assertEquals(1, ids[0]); - }) - .verifyComplete(); + .consumeNextWith(ids -> { + assertEquals(1, ids.length); + assertEquals(1, ids[0]); + }) + .verifyComplete(); verify(schemaRegistryClient, times(1)).deleteSubject(namespace.getMetadata().getCluster(), - "prefix.schema-one", false); + "prefix.schema-one", false); verify(schemaRegistryClient, times(1)).deleteSubject(namespace.getMetadata().getCluster(), - "prefix.schema-one", true); + "prefix.schema-one", true); } - /** - * Test the schema compatibility validation - */ @Test void validateSchemaCompatibility() { Namespace namespace = buildNamespace(); Schema schema = buildSchema(); SchemaCompatibilityCheckResponse schemaCompatibilityCheckResponse = SchemaCompatibilityCheckResponse.builder() - .isCompatible(true) - .build(); + .isCompatible(true) + .build(); when(schemaRegistryClient.validateSchemaCompatibility(any(), any(), any())) - .thenReturn(Mono.just(schemaCompatibilityCheckResponse)); + .thenReturn(Mono.just(schemaCompatibilityCheckResponse)); StepVerifier.create(schemaService.validateSchemaCompatibility(namespace.getMetadata().getCluster(), schema)) - .consumeNextWith(errors -> assertTrue(errors.isEmpty())) - .verifyComplete(); + .consumeNextWith(errors -> assertTrue(errors.isEmpty())) + .verifyComplete(); } - /** - * Test the schema compatibility invalidation - */ @Test void invalidateSchemaCompatibility() { Namespace namespace = buildNamespace(); Schema schema = buildSchema(); SchemaCompatibilityCheckResponse schemaCompatibilityCheckResponse = SchemaCompatibilityCheckResponse.builder() - .isCompatible(false) - .messages(List.of("Incompatible schema")) - .build(); + .isCompatible(false) + .messages(List.of("Incompatible schema")) + .build(); when(schemaRegistryClient.validateSchemaCompatibility(any(), any(), any())) - .thenReturn(Mono.just(schemaCompatibilityCheckResponse)); + .thenReturn(Mono.just(schemaCompatibilityCheckResponse)); StepVerifier.create(schemaService.validateSchemaCompatibility(namespace.getMetadata().getCluster(), schema)) .consumeNextWith(errors -> { @@ -224,129 +204,116 @@ void invalidateSchemaCompatibility() { .verifyComplete(); } - /** - * Test the schema compatibility validation when the Schema Registry returns 404 not found - */ @Test void validateSchemaCompatibility404NotFound() { Namespace namespace = buildNamespace(); Schema schema = buildSchema(); when(schemaRegistryClient.validateSchemaCompatibility(any(), any(), any())) - .thenReturn(Mono.empty()); + .thenReturn(Mono.empty()); StepVerifier.create(schemaService.validateSchemaCompatibility(namespace.getMetadata().getCluster(), schema)) - .consumeNextWith(errors -> assertTrue(errors.isEmpty())) - .verifyComplete(); + .consumeNextWith(errors -> assertTrue(errors.isEmpty())) + .verifyComplete(); } - /** - * Test the schema compatibility update when reset to default is asked - */ @Test void updateSubjectCompatibilityResetToDefault() { Namespace namespace = buildNamespace(); Schema schema = buildSchema(); when(schemaRegistryClient.deleteCurrentCompatibilityBySubject(any(), any())) - .thenReturn(Mono.just(SchemaCompatibilityResponse.builder() - .compatibilityLevel(Schema.Compatibility.FORWARD) - .build())); + .thenReturn(Mono.just(SchemaCompatibilityResponse.builder() + .compatibilityLevel(Schema.Compatibility.FORWARD) + .build())); StepVerifier.create(schemaService.updateSubjectCompatibility(namespace, schema, Schema.Compatibility.GLOBAL)) - .consumeNextWith(schemaCompatibilityResponse -> assertEquals(Schema.Compatibility.FORWARD, schemaCompatibilityResponse.compatibilityLevel())) + .consumeNextWith(schemaCompatibilityResponse -> assertEquals(Schema.Compatibility.FORWARD, + schemaCompatibilityResponse.compatibilityLevel())) .verifyComplete(); verify(schemaRegistryClient, times(1)).deleteCurrentCompatibilityBySubject(any(), any()); } - /** - * Test the schema compatibility validation - */ @Test void updateSubjectCompatibility() { Namespace namespace = buildNamespace(); Schema schema = buildSchema(); when(schemaRegistryClient.updateSubjectCompatibility(any(), any(), any())) - .thenReturn(Mono.just(SchemaCompatibilityResponse.builder() - .compatibilityLevel(Schema.Compatibility.FORWARD) - .build())); + .thenReturn(Mono.just(SchemaCompatibilityResponse.builder() + .compatibilityLevel(Schema.Compatibility.FORWARD) + .build())); StepVerifier.create(schemaService.updateSubjectCompatibility(namespace, schema, Schema.Compatibility.FORWARD)) - .consumeNextWith(schemaCompatibilityResponse -> assertEquals(Schema.Compatibility.FORWARD, schemaCompatibilityResponse.compatibilityLevel())) + .consumeNextWith(schemaCompatibilityResponse -> assertEquals(Schema.Compatibility.FORWARD, + schemaCompatibilityResponse.compatibilityLevel())) .verifyComplete(); verify(schemaRegistryClient, times(1)).updateSubjectCompatibility(any(), any(), any()); } - /** - * Test subjects belong to a namespace - * Assert the "-key"/"-value" suffixes are not taken in account when comparing subjects against the topics ACLs - */ @Test void isNamespaceOwnerOfSubjectTest() { Namespace ns = buildNamespace(); - when(accessControlEntryService.isNamespaceOwnerOfResource("myNamespace", AccessControlEntry.ResourceType.TOPIC, "prefix.schema-one")) - .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("myNamespace", AccessControlEntry.ResourceType.TOPIC, + "prefix.schema-one")) + .thenReturn(true); assertTrue(schemaService.isNamespaceOwnerOfSubject(ns, "prefix.schema-one-key")); assertTrue(schemaService.isNamespaceOwnerOfSubject(ns, "prefix.schema-one-value")); assertTrue(schemaService.isNamespaceOwnerOfSubject(ns, "prefix.schema-one")); } - /** - * Build a namespace resource - * @return The namespace - */ private Namespace buildNamespace() { return Namespace.builder() - .metadata(ObjectMeta.builder() - .name("myNamespace") - .cluster("local") - .build()) - .spec(Namespace.NamespaceSpec.builder() - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("myNamespace") + .cluster("local") + .build()) + .spec(Namespace.NamespaceSpec.builder() + .build()) + .build(); } - /** - * Build a schema resource - * @return The schema - */ private Schema buildSchema() { return Schema.builder() - .metadata(ObjectMeta.builder() - .name("prefix.schema-one") - .build()) - .spec(Schema.SchemaSpec.builder() - .compatibility(Schema.Compatibility.BACKWARD) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("prefix.schema-one") + .build()) + .spec(Schema.SchemaSpec.builder() + .compatibility(Schema.Compatibility.BACKWARD) + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build()) + .build(); } - /** - * Build a schema response - * @param subject The subject to set to the schema - * @return The schema response - */ private SchemaResponse buildSchemaResponse(String subject) { return SchemaResponse.builder() - .id(1) - .version(1) - .subject(subject) - .schema("{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\",\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"First name of the person\"},{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null,\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null,\"doc\":\"Date of birth of the person\"}]}") - .build(); + .id(1) + .version(1) + .subject(subject) + .schema( + "{\"namespace\":\"com.michelin.kafka.producer.showcase.avro\",\"type\":\"record\"," + + "\"name\":\"PersonAvro\",\"fields\":[{\"name\":\"firstName\",\"type\":[\"null\",\"string\"]," + + "\"default\":null,\"doc\":\"First name of the person\"}," + + "{\"name\":\"lastName\",\"type\":[\"null\",\"string\"],\"default\":null," + + "\"doc\":\"Last name of the person\"},{\"name\":\"dateOfBirth\",\"type\":[\"null\"," + + "{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null," + + "\"doc\":\"Date of birth of the person\"}]}") + .build(); } - /** - * Build a schema compatibility response - * @return The compatibility response - */ private SchemaCompatibilityResponse buildCompatibilityResponse() { return SchemaCompatibilityResponse.builder() - .compatibilityLevel(Schema.Compatibility.BACKWARD) - .build(); + .compatibilityLevel(Schema.Compatibility.BACKWARD) + .build(); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/StreamServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/StreamServiceTest.java index 390a1493..2fefc2a2 100644 --- a/src/test/java/com/michelin/ns4kafka/services/StreamServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/StreamServiceTest.java @@ -1,10 +1,15 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.KafkaStream; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.repositories.StreamRepository; +import java.util.List; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -12,12 +17,6 @@ import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class StreamServiceTest { @InjectMocks @@ -32,14 +31,14 @@ class StreamServiceTest { @Test void findAllEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(streamRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); var actual = streamService.findAllForNamespace(ns); assertTrue(actual.isEmpty()); } @@ -47,38 +46,38 @@ void findAllEmpty() { @Test void findAll() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .namespace("test") - .cluster("local") - .build()) + .name("test_stream1") + .namespace("test") + .cluster("local") + .build()) .build(); KafkaStream stream2 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream2") - .namespace("test") - .cluster("local") - .build()) + .name("test_stream2") + .namespace("test") + .cluster("local") + .build()) .build(); KafkaStream stream3 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream3") - .namespace("test") - .cluster("local") - .build()) + .name("test_stream3") + .namespace("test") + .cluster("local") + .build()) .build(); when(streamRepository.findAllForCluster("local")) - .thenReturn(List.of(stream1, stream2, stream3)); + .thenReturn(List.of(stream1, stream2, stream3)); var actual = streamService.findAllForNamespace(ns); - assertEquals(3,actual.size()); + assertEquals(3, actual.size()); assertTrue(actual.contains(stream1)); assertTrue(actual.contains(stream2)); assertTrue(actual.contains(stream3)); @@ -86,38 +85,37 @@ void findAll() { @Test void findByName() { - Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); KafkaStream stream1 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream1") - .namespace("test") - .cluster("local") - .build()) + .name("test_stream1") + .namespace("test") + .cluster("local") + .build()) .build(); KafkaStream stream2 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream2") - .namespace("test") - .cluster("local") - .build()) + .name("test_stream2") + .namespace("test") + .cluster("local") + .build()) .build(); KafkaStream stream3 = KafkaStream.builder() .metadata(ObjectMeta.builder() - .name("test_stream3") - .namespace("test") - .cluster("local") - .build()) + .name("test_stream3") + .namespace("test") + .cluster("local") + .build()) .build(); when(streamRepository.findAllForCluster("local")) - .thenReturn(List.of(stream1, stream2, stream3)); + .thenReturn(List.of(stream1, stream2, stream3)); var actual = streamService.findByName(ns, "test_stream2"); assertTrue(actual.isPresent()); assertEquals(stream2, actual.get()); @@ -127,14 +125,14 @@ void findByName() { void findByNameEmpty() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); when(streamRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); var actual = streamService.findByName(ns, "test_stream2"); assertTrue(actual.isEmpty()); } @@ -142,95 +140,95 @@ void findByNameEmpty() { @Test void isNamespaceOwnerOfKafkaStream() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("test") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test") + .cluster("local") + .build()) + .build(); AccessControlEntry ace1 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test.") + .grantedTo("test") + .build() + ) + .build(); AccessControlEntry ace2 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.GROUP) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.GROUP) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test.") + .grantedTo("test") + .build() + ) + .build(); AccessControlEntry ace3 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.CONNECT) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.CONNECT) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test.") + .grantedTo("test") + .build() + ) + .build(); AccessControlEntry ace4 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test-bis.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test-bis.") + .grantedTo("test") + .build() + ) + .build(); AccessControlEntry ace5 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.GROUP) - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test-bis.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.GROUP) + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test-bis.") + .grantedTo("test") + .build() + ) + .build(); AccessControlEntry ace6 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.GROUP) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test-ter.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.GROUP) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test-ter.") + .grantedTo("test") + .build() + ) + .build(); AccessControlEntry ace7 = AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .permission(AccessControlEntry.Permission.OWNER) - .resource("test-qua.") - .grantedTo("test") - .build() - ) - .build(); + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .permission(AccessControlEntry.Permission.OWNER) + .resource("test-qua.") + .grantedTo("test") + .build() + ) + .build(); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of(ace1, ace2, ace3, ace4, ace5, ace6, ace7)); + .thenReturn(List.of(ace1, ace2, ace3, ace4, ace5, ace6, ace7)); assertTrue( - streamService.isNamespaceOwnerOfKafkaStream(ns, "test.stream")); + streamService.isNamespaceOwnerOfKafkaStream(ns, "test.stream")); Assertions.assertFalse( - streamService.isNamespaceOwnerOfKafkaStream(ns, "test-bis.stream"),"ACL are LITERAL"); + streamService.isNamespaceOwnerOfKafkaStream(ns, "test-bis.stream"), "ACL are LITERAL"); Assertions.assertFalse( - streamService.isNamespaceOwnerOfKafkaStream(ns, "test-ter.stream"), "Topic ACL missing"); + streamService.isNamespaceOwnerOfKafkaStream(ns, "test-ter.stream"), "Topic ACL missing"); Assertions.assertFalse( - streamService.isNamespaceOwnerOfKafkaStream(ns, "test-qua.stream"),"Group ACL missing"); + streamService.isNamespaceOwnerOfKafkaStream(ns, "test-qua.stream"), "Group ACL missing"); Assertions.assertFalse( - streamService.isNamespaceOwnerOfKafkaStream(ns, "test-nop.stream"),"No ACL"); + streamService.isNamespaceOwnerOfKafkaStream(ns, "test-nop.stream"), "No ACL"); } } diff --git a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java index 3c5e847b..a49a3d9e 100644 --- a/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java +++ b/src/test/java/com/michelin/ns4kafka/services/TopicServiceTest.java @@ -1,15 +1,29 @@ package com.michelin.ns4kafka.services; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertLinesMatch; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.michelin.ns4kafka.models.AccessControlEntry; import com.michelin.ns4kafka.models.Namespace; import com.michelin.ns4kafka.models.Namespace.NamespaceSpec; import com.michelin.ns4kafka.models.ObjectMeta; import com.michelin.ns4kafka.models.Topic; +import com.michelin.ns4kafka.properties.ManagedClusterProperties; import com.michelin.ns4kafka.repositories.TopicRepository; -import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig; import com.michelin.ns4kafka.services.executors.TopicAsyncExecutor; import io.micronaut.context.ApplicationContext; import io.micronaut.inject.qualifiers.Qualifiers; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -18,14 +32,6 @@ import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; -import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.when; - @ExtendWith(MockitoExtension.class) class TopicServiceTest { @InjectMocks @@ -41,63 +47,60 @@ class TopicServiceTest { ApplicationContext applicationContext; @Mock - List kafkaAsyncExecutorConfigs; + List managedClusterProperties; - /** - * Validate find topic by name - */ @Test void findByName() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); Topic t1 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .build(); Topic t2 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic2").build()) + .build(); Topic t3 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns1-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-topic1").build()) + .build(); Topic t4 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns2-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-topic1").build()) + .build(); when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of(t1, t2, t3, t4)); + .thenReturn(List.of(t1, t2, t3, t4)); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns1-topic1") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns1-topic1") + .build()) + .build() + )); // search topic by name Optional actualTopicPrefixed = topicService.findByName(ns, "ns-topic1"); @@ -110,28 +113,25 @@ void findByName() { assertThrows(NoSuchElementException.class, actualTopicNotFound::get, "No value present"); } - /** - * Validate empty response when no topic in namespace - */ @Test void findAllForNamespaceNoTopics() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); // no ns4kfk access control entries when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); // no ns4kfk topics when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); // get list of topics List list = topicService.findAllForNamespace(ns); @@ -140,120 +140,112 @@ void findAllForNamespaceNoTopics() { assertTrue(list.isEmpty()); } - /** - * Validate empty response when no topic ACLs - */ @Test void findAllForNamespaceNoAcls() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); // init ns4kfk topics Topic t1 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .build(); Topic t2 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic2").build()) + .build(); Topic t3 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns1-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-topic1").build()) + .build(); Topic t4 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns2-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-topic1").build()) + .build(); when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of(t1, t2, t3, t4)); + .thenReturn(List.of(t1, t2, t3, t4)); // no ns4kfk access control entries when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of()); + .thenReturn(List.of()); // list of topics is empty List actual = topicService.findAllForNamespace(ns); assertTrue(actual.isEmpty()); } - /** - * Validate find all topics for namespace - */ @Test void findAllForNamespace() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); - // init ns4kfk topics Topic t0 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns0-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns0-topic1").build()) + .build(); Topic t1 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .build(); Topic t2 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic2").build()) + .build(); Topic t3 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns1-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-topic1").build()) + .build(); Topic t4 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns2-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-topic1").build()) + .build(); when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of(t0,t1, t2, t3, t4)); + .thenReturn(List.of(t0, t1, t2, t3, t4)); - // ns4kfk access control entries when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns0-topic1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.READ) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns1-topic1") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.WRITE) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns2-topic1") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns0-topic1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.READ) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns1-topic1") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.WRITE) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns2-topic1") + .build()) + .build() + )); // search for topics into namespace @@ -269,68 +261,66 @@ void findAllForNamespace() { Assertions.assertFalse(actual.stream().anyMatch(topic -> topic.getMetadata().getName().equals("ns2-topic1"))); } - /** - * Validate unsynchronized topics listing - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void listUnsynchronizedNoExistingTopics() throws InterruptedException, ExecutionException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); // init topicAsyncExecutor TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(topicAsyncExecutor); + Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(topicAsyncExecutor); // list of existing broker topics when(topicAsyncExecutor.listBrokerTopicNames()).thenReturn(List.of("ns-topic1", "ns-topic2", - "ns1-topic1", "ns2-topic1")); + "ns1-topic1", "ns2-topic1")); // list of existing ns4kfk access control entries - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns-topic1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns-topic2")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns1-topic1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns2-topic1")) - .thenReturn(false); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns-topic1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns-topic2")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns1-topic1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns2-topic1")) + .thenReturn(false); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns1-topic1") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns1-topic1") + .build()) + .build() + )); // no topic exists into ns4kfk when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of()); + .thenReturn(List.of()); List actual = topicService.listUnsynchronizedTopicNames(ns); assertEquals(3, actual.size()); @@ -343,83 +333,81 @@ void listUnsynchronizedNoExistingTopics() throws InterruptedException, Execution } - /** - * Validate unsynchronized topics listing when all topics existing - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void listUnsynchronizedAllExistingTopics() throws InterruptedException, ExecutionException, TimeoutException { // init ns4kfk namespace Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); - // init ns4kfk topics Topic t1 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .build(); Topic t2 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic2").build()) + .build(); Topic t3 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns1-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-topic1").build()) + .build(); Topic t4 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns2-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-topic1").build()) + .build(); // init topicAsyncExecutor TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(topicAsyncExecutor); + Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(topicAsyncExecutor); // list of existing broker topics - when(topicAsyncExecutor.listBrokerTopicNames()).thenReturn(List.of(t1.getMetadata().getName(), t2.getMetadata().getName(), + when(topicAsyncExecutor.listBrokerTopicNames()).thenReturn( + List.of(t1.getMetadata().getName(), t2.getMetadata().getName(), t3.getMetadata().getName(), t4.getMetadata().getName())); // list of existing ns4kfk access control entries - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, t1.getMetadata().getName())) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, t2.getMetadata().getName())) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, t3.getMetadata().getName())) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, t4.getMetadata().getName())) - .thenReturn(false); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + t1.getMetadata().getName())) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + t2.getMetadata().getName())) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + t3.getMetadata().getName())) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + t4.getMetadata().getName())) + .thenReturn(false); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns1-topic1") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns1-topic1") + .build()) + .build() + )); // all topic exists into ns4kfk when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of(t1, t2, t3, t4)); + .thenReturn(List.of(t1, t2, t3, t4)); List actual = topicService.listUnsynchronizedTopicNames(ns); @@ -427,74 +415,71 @@ void listUnsynchronizedAllExistingTopics() throws InterruptedException, Executio } - /** - * Validate unsynchronized topics listing when some topics existing and some not - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void listUnsynchronizedPartialExistingTopics() throws InterruptedException, ExecutionException, TimeoutException { // init ns4kfk namespace Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .spec(NamespaceSpec.builder() - .connectClusters(List.of("local-name")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .spec(NamespaceSpec.builder() + .connectClusters(List.of("local-name")) + .build()) + .build(); - // init ns4kfk topics Topic t1 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .build(); // init topicAsyncExecutor TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, - Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(topicAsyncExecutor); + Qualifiers.byName(ns.getMetadata().getCluster()))).thenReturn(topicAsyncExecutor); // list of existing broker topics when(topicAsyncExecutor.listBrokerTopicNames()).thenReturn(List.of("ns-topic1", "ns-topic2", - "ns1-topic1", "ns2-topic1")); + "ns1-topic1", "ns2-topic1")); // list of existing ns4kfk access control entries - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns-topic1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns-topic2")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns1-topic1")) - .thenReturn(true); - when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, "ns2-topic1")) - .thenReturn(false); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns-topic1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns-topic2")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns1-topic1")) + .thenReturn(true); + when(accessControlEntryService.isNamespaceOwnerOfResource("namespace", AccessControlEntry.ResourceType.TOPIC, + "ns2-topic1")) + .thenReturn(false); when(accessControlEntryService.findAllGrantedToNamespace(ns)) - .thenReturn(List.of( - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns-") - .build()) - .build(), - AccessControlEntry.builder() - .spec(AccessControlEntry.AccessControlEntrySpec.builder() - .permission(AccessControlEntry.Permission.OWNER) - .grantedTo("namespace") - .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) - .resourceType(AccessControlEntry.ResourceType.TOPIC) - .resource("ns1-topic1") - .build()) - .build() - )); + .thenReturn(List.of( + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.PREFIXED) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns-") + .build()) + .build(), + AccessControlEntry.builder() + .spec(AccessControlEntry.AccessControlEntrySpec.builder() + .permission(AccessControlEntry.Permission.OWNER) + .grantedTo("namespace") + .resourcePatternType(AccessControlEntry.ResourcePatternType.LITERAL) + .resourceType(AccessControlEntry.ResourceType.TOPIC) + .resource("ns1-topic1") + .build()) + .build() + )); // partial number of topics exists into ns4kfk when(topicRepository.findAllForCluster("local")) - .thenReturn(List.of(t1)); + .thenReturn(List.of(t1)); List actual = topicService.listUnsynchronizedTopicNames(ns); @@ -508,87 +493,69 @@ void listUnsynchronizedPartialExistingTopics() throws InterruptedException, Exec } - /** - * Validate colliding topics when there is no collision - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void findCollidingTopicsNoCollision() throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("project1.topic").build()) - .build(); + .metadata(ObjectMeta.builder().name("project1.topic").build()) + .build(); TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, Qualifiers.byName("local"))) - .thenReturn(topicAsyncExecutor); + .thenReturn(topicAsyncExecutor); when(topicAsyncExecutor.listBrokerTopicNames()) - .thenReturn(List.of("project2.topic", "project1.other")); + .thenReturn(List.of("project2.topic", "project1.other")); List actual = topicService.findCollidingTopics(ns, topic); assertTrue(actual.isEmpty()); } - /** - * Validate colliding topics when names collide - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void findCollidingTopicsIdenticalName() throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("project1.topic").build()) - .build(); + .metadata(ObjectMeta.builder().name("project1.topic").build()) + .build(); TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, Qualifiers.byName("local"))) - .thenReturn(topicAsyncExecutor); + .thenReturn(topicAsyncExecutor); when(topicAsyncExecutor.listBrokerTopicNames()) - .thenReturn(List.of("project1.topic", "project2.topic", "project1.other")); + .thenReturn(List.of("project1.topic", "project2.topic", "project1.other")); List actual = topicService.findCollidingTopics(ns, topic); assertTrue(actual.isEmpty(), "Topic with exactly the same name should not interfere with collision check"); } - /** - * Validate colliding topics when names collide - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void findCollidingTopicsCollidingName() throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("project1.topic").build()) - .build(); + .metadata(ObjectMeta.builder().name("project1.topic").build()) + .build(); TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, Qualifiers.byName("local"))) - .thenReturn(topicAsyncExecutor); + .thenReturn(topicAsyncExecutor); when(topicAsyncExecutor.listBrokerTopicNames()) - .thenReturn(List.of("project1_topic")); + .thenReturn(List.of("project1_topic")); List actual = topicService.findCollidingTopics(ns, topic); @@ -596,126 +563,109 @@ void findCollidingTopicsCollidingName() throws ExecutionException, InterruptedEx assertLinesMatch(List.of("project1_topic"), actual); } - /** - * Validate colliding topics when there is an interrupted exception - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void findCollidingTopicsInterruptedException() throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("project1.topic").build()) - .build(); + .metadata(ObjectMeta.builder().name("project1.topic").build()) + .build(); TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, Qualifiers.byName("local"))) - .thenReturn(topicAsyncExecutor); + .thenReturn(topicAsyncExecutor); when(topicAsyncExecutor.listBrokerTopicNames()) - .thenThrow(new InterruptedException()); + .thenThrow(new InterruptedException()); - assertThrows(InterruptedException.class, - () -> topicService.findCollidingTopics(ns, topic)); + assertThrows(InterruptedException.class, + () -> topicService.findCollidingTopics(ns, topic)); assertTrue(Thread.interrupted()); } - /** - * Validate colliding topics when there is a runtime exception - * @throws InterruptedException Any interrupted exception - * @throws ExecutionException Any execution exception - * @throws TimeoutException Any timeout exception - */ @Test void findCollidingTopicsOtherException() throws ExecutionException, InterruptedException, TimeoutException { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder().name("project1.topic").build()) - .build(); + .metadata(ObjectMeta.builder().name("project1.topic").build()) + .build(); TopicAsyncExecutor topicAsyncExecutor = Mockito.mock(TopicAsyncExecutor.class); when(applicationContext.getBean(TopicAsyncExecutor.class, Qualifiers.byName("local"))) - .thenReturn(topicAsyncExecutor); + .thenReturn(topicAsyncExecutor); when(topicAsyncExecutor.listBrokerTopicNames()) - .thenThrow(new RuntimeException("Unknown Error")); + .thenThrow(new RuntimeException("Unknown Error")); assertThrows(RuntimeException.class, - () -> topicService.findCollidingTopics(ns, topic)); + () -> topicService.findCollidingTopics(ns, topic)); } - /** - * Validate a topic is eligible for record deletion - */ @Test void validateDeleteRecordsTopic() { Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("project1.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .configs(Collections.singletonMap("cleanup.policy", "compact")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("project1.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .configs(Collections.singletonMap("cleanup.policy", "compact")) + .build()) + .build(); List actual = topicService.validateDeleteRecordsTopic(topic); assertEquals(1, actual.size()); - assertLinesMatch(List.of("Cannot delete records on a compacted topic. Please delete and recreate the topic."), actual); + assertLinesMatch(List.of("Cannot delete records on a compacted topic. Please delete and recreate the topic."), + actual); } - /** - * Validate topic update when partition number change - */ @Test void validateTopicUpdatePartitions() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .namespace("test") - .cluster("local") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .namespace("test") + .cluster("local") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(6) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); - - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of()); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(6) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(managedClusterProperties.stream()).thenReturn(Stream.of()); List actual = topicService.validateTopicUpdate(ns, existing, topic); @@ -723,208 +673,201 @@ void validateTopicUpdatePartitions() { assertLinesMatch(List.of("Invalid value 6 for configuration partitions: Value is immutable (3)."), actual); } - /** - * Validate topic update when replication factor change - */ @Test void validateTopicUpdateReplicationFactor() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(6) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); - - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of()); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(6) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(managedClusterProperties.stream()).thenReturn(Stream.of()); List actual = topicService.validateTopicUpdate(ns, existing, topic); assertEquals(1, actual.size()); - assertLinesMatch(List.of("Invalid value 6 for configuration replication.factor: Value is immutable (3)."), actual); + assertLinesMatch(List.of("Invalid value 6 for configuration replication.factor: Value is immutable (3)."), + actual); } - /** - * Validate topic update when cleanup policy change from delete to compact on Confluent Cloud - */ @Test - void validateTopicUpdateCleanupPolicyDeleteToCompactOnCCloud() { + void validateTopicUpdateCleanupPolicyDeleteToCompactOnCloud() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); - - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(managedClusterProperties.stream()).thenReturn( + Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); List actual = topicService.validateTopicUpdate(ns, existing, topic); assertEquals(1, actual.size()); - assertLinesMatch(List.of("Invalid value compact for configuration cleanup.policy: Altering topic configuration from `delete` to `compact` is not currently supported. Please create a new topic with `compact` policy specified instead."), actual); + assertLinesMatch(List.of( + "Invalid value compact for configuration cleanup.policy: Altering topic configuration " + + "from `delete` to `compact` is not currently supported. Please create a new topic with " + + "`compact` policy specified instead."), + actual); } - /** - * Validate topic update when cleanup policy change from compact to delete on Confluent Cloud - */ @Test - void validateTopicUpdateCleanupPolicyCompactToDeleteOnCCloud() { + void validateTopicUpdateCleanupPolicyCompactToDeleteOnCloud() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); - - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.CONFLUENT_CLOUD))); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(managedClusterProperties.stream()).thenReturn( + Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.CONFLUENT_CLOUD))); List actual = topicService.validateTopicUpdate(ns, existing, topic); assertEquals(0, actual.size()); } - /** - * Validate topic update when cleanup policy change from delete to compact on Confluent Cloud - */ @Test void validateTopicUpdateCleanupPolicyDeleteToCompactOnSelfManaged() { Namespace ns = Namespace.builder() - .metadata(ObjectMeta.builder() - .name("namespace") - .cluster("local") - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("namespace") + .cluster("local") + .build()) + .build(); Topic existing = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","delete", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "delete", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); Topic topic = Topic.builder() - .metadata(ObjectMeta.builder() - .name("test.topic") - .build()) - .spec(Topic.TopicSpec.builder() - .replicationFactor(3) - .partitions(3) - .configs(Map.of("cleanup.policy","compact", - "min.insync.replicas", "2", - "retention.ms", "60000")) - .build()) - .build(); - - when(kafkaAsyncExecutorConfigs.stream()).thenReturn(Stream.of(new KafkaAsyncExecutorConfig("local", KafkaAsyncExecutorConfig.KafkaProvider.SELF_MANAGED))); + .metadata(ObjectMeta.builder() + .name("test.topic") + .build()) + .spec(Topic.TopicSpec.builder() + .replicationFactor(3) + .partitions(3) + .configs(Map.of("cleanup.policy", "compact", + "min.insync.replicas", "2", + "retention.ms", "60000")) + .build()) + .build(); + + when(managedClusterProperties.stream()).thenReturn( + Stream.of(new ManagedClusterProperties("local", ManagedClusterProperties.KafkaProvider.SELF_MANAGED))); List actual = topicService.validateTopicUpdate(ns, existing, topic); assertEquals(0, actual.size()); } - /** - * Validate find all for all namespaces - */ @Test void findAll() { Topic t1 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic1").build()) + .build(); Topic t2 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns-topic2").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns-topic2").build()) + .build(); Topic t3 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns1-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns1-topic1").build()) + .build(); Topic t4 = Topic.builder() - .metadata(ObjectMeta.builder().name("ns2-topic1").build()) - .build(); + .metadata(ObjectMeta.builder().name("ns2-topic1").build()) + .build(); when(topicRepository.findAll()).thenReturn(List.of(t1, t2, t3, t4)); diff --git a/src/test/java/com/michelin/ns4kafka/testcontainers/KafkaConnectContainer.java b/src/test/java/com/michelin/ns4kafka/testcontainers/KafkaConnectContainer.java index d658af6f..0822022a 100644 --- a/src/test/java/com/michelin/ns4kafka/testcontainers/KafkaConnectContainer.java +++ b/src/test/java/com/michelin/ns4kafka/testcontainers/KafkaConnectContainer.java @@ -1,16 +1,17 @@ package com.michelin.ns4kafka.testcontainers; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.wait.strategy.Wait; -import org.testcontainers.utility.DockerImageName; +import static java.lang.String.format; import java.time.Duration; import java.util.UUID; - -import static java.lang.String.format; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; /** - * This file is a slight adaptation of the KafkaConnectContainer code available on ydespreaux's Github account: + * This file is a slight adaptation of the KafkaConnectContainer code. + * Available on ydespreaux's GitHub account. + * * @see KafkaConnectContainer.java */ public class KafkaConnectContainer extends GenericContainer { @@ -44,22 +45,31 @@ public class KafkaConnectContainer extends GenericContainer createContainerCmd.withName("testcontainsers-kafka-connect-" + UUID.randomUUID())); + .withEnv("CONNECT_REST_ADVERTISED_HOST_NAME", "kafka-connect") + .withEnv("CONNECT_PLUGIN_PATH", PLUGIN_PATH_CONTAINER) + .withEnv("CONNECT_LOG4J_LOGGERS", "org.reflections=ERROR") + .withEnv("CONNECT_REST_PORT", String.valueOf(CONNECT_REST_PORT_INTERNAL)) + .withCreateContainerCmdModifier(createContainerCmd -> createContainerCmd.withName( + "testcontainsers-kafka-connect-" + UUID.randomUUID())); } /** - * Get the url of Kafka Connect + * Get the url of Kafka Connect. + * * @return The URL */ public String getUrl() { diff --git a/src/test/java/com/michelin/ns4kafka/testcontainers/SchemaRegistryContainer.java b/src/test/java/com/michelin/ns4kafka/testcontainers/SchemaRegistryContainer.java index 8429e259..488f859a 100644 --- a/src/test/java/com/michelin/ns4kafka/testcontainers/SchemaRegistryContainer.java +++ b/src/test/java/com/michelin/ns4kafka/testcontainers/SchemaRegistryContainer.java @@ -1,12 +1,12 @@ package com.michelin.ns4kafka.testcontainers; +import static java.lang.String.format; + import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.utility.DockerImageName; -import static java.lang.String.format; - /** * This class is a testcontainers implementation for the * https://docs.confluent.io/current/schema-registry/index.html @@ -16,17 +16,17 @@ public class SchemaRegistryContainer extends GenericContainer