diff --git a/.checkstyle/checkstyle.xml b/.checkstyle/checkstyle.xml
new file mode 100644
index 00000000..32c0f31e
--- /dev/null
+++ b/.checkstyle/checkstyle.xml
@@ -0,0 +1,382 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.github/workflows/on_pull_request.yml b/.github/workflows/on_pull_request.yml
index 95d2c00c..3a9f0927 100644
--- a/.github/workflows/on_pull_request.yml
+++ b/.github/workflows/on_pull_request.yml
@@ -8,39 +8,42 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - name: Checkout project
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Set up JDK 17
- uses: actions/setup-java@v3
- with:
- java-version: '17'
- distribution: 'temurin'
-
- - name: Cache SonarCloud packages
- uses: actions/cache@v3
- with:
- path: ~/.sonar/cache
- key: ${{ runner.os }}-sonar
- restore-keys: ${{ runner.os }}-sonar
-
- - name: Cache Gradle packages
- uses: actions/cache@v3
- with:
- path: ~/.gradle/caches
- key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
- restore-keys: ${{ runner.os }}-gradle
-
- - name: Build and analyze
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
- run: ./gradlew build jacocoTestReport sonar --info
-
- - name: Publish test report
- if: always()
- uses: mikepenz/action-junit-report@v4
- with:
- report_paths: '**/build/test-results/test/TEST-*.xml'
+ - name: Checkout project
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up JDK 17
+ uses: actions/setup-java@v3
+ with:
+ java-version: '17'
+ distribution: 'temurin'
+
+ - name: Cache SonarCloud packages
+ uses: actions/cache@v3
+ with:
+ path: ~/.sonar/cache
+ key: ${{ runner.os }}-sonar
+ restore-keys: ${{ runner.os }}-sonar
+
+ - name: Cache Gradle packages
+ uses: actions/cache@v3
+ with:
+ path: ~/.gradle/caches
+ key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
+ restore-keys: ${{ runner.os }}-gradle
+
+ - name: Check Style
+ run: ./gradlew checkstyleMain checkstyleTest
+
+ - name: Build and analyze
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
+ run: ./gradlew build jacocoTestReport sonar --info
+
+ - name: Publish test report
+ if: always()
+ uses: mikepenz/action-junit-report@v4
+ with:
+ report_paths: '**/build/test-results/test/TEST-*.xml'
\ No newline at end of file
diff --git a/.github/workflows/on_push_master.yml b/.github/workflows/on_push_master.yml
index c77415ce..f30d2152 100644
--- a/.github/workflows/on_push_master.yml
+++ b/.github/workflows/on_push_master.yml
@@ -39,6 +39,9 @@ jobs:
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
restore-keys: ${{ runner.os }}-gradle
+ - name: Check Style
+ run: ./gradlew checkstyleMain checkstyleTest
+
- name: Build and analyze
id: build_jar
env:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6ba2b42d..e83638b9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,30 +2,43 @@
Welcome and thank you for considering contributing to Ns4kafka!
-By following these guidelines, you can help make the contribution process easy and effective for everyone involved. It also shows that you agree to respect the time of the developers managing and developing these open source projects. In return, we will reciprocate that respect by addressing your issue, assessing changes, and helping you finalize your pull requests.
+By following these guidelines, you can help make the contribution process easy and effective for everyone involved. It
+also shows that you agree to respect the time of the developers managing and developing these open source projects. In
+return, we will reciprocate that respect by addressing your issue, assessing changes, and helping you finalize your pull
+requests.
## Getting Started
### Issues
-Issues should be used to report problems, request a new feature, or to discuss potential changes before a PR is created. When you create a new Issue, a template will be loaded that will guide you through collecting and providing the information we need to investigate.
+Issues should be used to report problems, request a new feature, or to discuss potential changes before a PR is created.
+When you create a new Issue, a template will be loaded that will guide you through collecting and providing the
+information we need to investigate.
-If you find an existing issue that addresses the problem you're having, please add your own reproduction information to the existing issue instead of creating a new one. Adding a [reaction](https://github.blog/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) can also indicate to our maintainers that a particular problem is affecting more than just the reporter.
+If you find an existing issue that addresses the problem you're having, please add your own reproduction information to
+the existing issue instead of creating a new one. Adding
+a [reaction](https://github.blog/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) can also indicate to
+our maintainers that a particular problem is affecting more than just the reporter.
-If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and a clear description, relevant information, and a code sample or executable test case demonstrating the expected behavior that is not occurring.
+If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and a clear
+description, relevant information, and a code sample or executable test case demonstrating the expected behavior that is
+not occurring.
### Pull Requests
-PRs are always welcome and can be a quick way to get your fix or improvement slated for the next release. In general, PRs should:
+PRs are always welcome and can be a quick way to get your fix or improvement slated for the next release. In general,
+PRs should:
- Only fix/add the functionality in question OR address wide-spread style issues, not both.
- Add unit or integration tests for fixed or changed functionality (if a test suite already exists).
- Address a single concern in the least number of changed lines as possible.
- Be accompanied by a complete Pull Request template (loaded automatically when a PR is created).
-Be sure to use the past tense ("Added new feature...", "Fixed bug on...") and add tags to the PR ("documentation" for documentation updates, "bug" for bug fixing, etc.).
+Be sure to use the past tense ("Added new feature...", "Fixed bug on...") and add tags to the PR ("documentation" for
+documentation updates, "bug" for bug fixing, etc.).
-For changes that address core functionality or would require breaking changes (e.g. a major release), it's best to open an Issue to discuss your proposal first. This is not required but can save time creating and reviewing changes.
+For changes that address core functionality or would require breaking changes (e.g. a major release), it's best to open
+an Issue to discuss your proposal first. This is not required but can save time creating and reviewing changes.
In general, we follow the ["fork-and-pull" Git workflow](https://github.com/susam/gitpr)
@@ -37,14 +50,22 @@ In general, we follow the ["fork-and-pull" Git workflow](https://github.com/susa
- Push changes to your fork
- Open a PR in our repository targeting master and follow the PR template so that we can efficiently review the changes.
-## Styleguides
+## Style Guide
-### Git Commit Messages
+### Code Style
-When contributing to the project, it's important to follow a consistent style for Git commit messages. Here are some guidelines to keep in mind:
+We maintain a consistent code style using [Checkstyle](https://checkstyle.sourceforge.io/).
-- Use the present tense, such as "Add feature," rather than the past tense, such as "Added feature."
-- Use the imperative mood, such as "Move cursor to..." rather than "Moves cursor to..."
-- Limit the first line of the commit message to 72 characters or less.
-- Use references to issues and pull requests after the first line as needed.
-- If your commit only changes documentation, include `[ci skip]` in the commit title.
+The configuration file is defined in the `.checkstyle` folder.
+To perform Checkstyle validation, run the following:
+
+```bash
+./gradlew checkstyleMain checkstyleTest
+```
+
+Before you start contributing new code, it is recommended to:
+
+- Install the IntelliJ [CheckStyle-IDEA](https://plugins.jetbrains.com/plugin/1065-checkstyle-idea) plugin.
+- Configure the plugin to use Ns4Kafka's Checkstyle configuration file.
+
+Adhering to this code style ensures consistency and helps maintain code quality throughout the project.
\ No newline at end of file
diff --git a/README.md b/README.md
index 6b1e2a8c..4cdb9214 100644
--- a/README.md
+++ b/README.md
@@ -8,52 +8,62 @@
[![Docker Pulls](https://img.shields.io/docker/pulls/michelin/ns4kafka?label=Pulls&logo=docker&style=for-the-badge)](https://hub.docker.com/r/michelin/ns4kafka/tags)
[![Docker Stars](https://img.shields.io/docker/stars/michelin/ns4kafka?label=Stars&logo=docker&style=for-the-badge)](https://hub.docker.com/r/michelin/ns4kafka)
[![SonarCloud Coverage](https://img.shields.io/sonar/coverage/michelin_ns4kafka?logo=sonarcloud&server=https%3A%2F%2Fsonarcloud.io&style=for-the-badge)](https://sonarcloud.io/component_measures?id=michelin_ns4kafka&metric=coverage&view=list)
+[![SonarCloud Tests](https://img.shields.io/sonar/tests/michelin_ns4kafka/master?server=https%3A%2F%2Fsonarcloud.io&style=for-the-badge&logo=sonarcloud)](https://sonarcloud.io/component_measures?metric=tests&view=list&id=michelin_kstreamplify)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg?logo=apache&style=for-the-badge)](https://opensource.org/licenses/Apache-2.0)
-Ns4Kafka introduces namespace functionality to Apache Kafka, as well as a new deployment model for Kafka resources using [Kafkactl](https://github.com/michelin/kafkactl), which follows best practices from Kubernetes.
+Ns4Kafka introduces namespace functionality to Apache Kafka, as well as a new deployment model for Kafka resources
+using [Kafkactl](https://github.com/michelin/kafkactl), which follows best practices from Kubernetes.
## Table of Contents
* [Principles](#principles)
- * [Namespace Isolation](#namespace-isolation)
- * [Desired State](#desired-state)
- * [Server Side Validation](#server-side-validation)
- * [CLI](#cli)
+ * [Namespace Isolation](#namespace-isolation)
+ * [Desired State](#desired-state)
+ * [Server Side Validation](#server-side-validation)
+ * [CLI](#cli)
* [Download](#download)
* [Install](#install)
* [Demo Environment](#demo-environment)
* [Configuration](#configuration)
- * [GitLab Authentication](#gitlab-authentication)
- * [Admin Account](#admin-account)
- * [Kafka Broker Authentication](#kafka-broker-authentication)
- * [Managed clusters](#managed-clusters)
- * [AKHQ](#akhq)
+ * [GitLab Authentication](#gitlab-authentication)
+ * [Admin Account](#admin-account)
+ * [Kafka Broker Authentication](#kafka-broker-authentication)
+ * [Managed clusters](#managed-clusters)
+ * [AKHQ](#akhq)
* [Administration](#administration)
* [Contribution](#contribution)
## Principles
-Ns4Kafka is an API that provides controllers for listing, creating, and deleting various Kafka resources, including topics, connectors, schemas, and Kafka Connect clusters. The solution is built on several principles.
+Ns4Kafka is an API that provides controllers for listing, creating, and deleting various Kafka resources, including
+topics, connectors, schemas, and Kafka Connect clusters. The solution is built on several principles.
### Namespace Isolation
-Ns4Kafka implements the concept of namespaces, which enable encapsulation of Kafka resources within specific namespaces. Each namespace can only view and manage the resources that belong to it, with other namespaces being isolated from each other. This isolation is achieved by assigning ownership of names and prefixes to specific namespaces.
+Ns4Kafka implements the concept of namespaces, which enable encapsulation of Kafka resources within specific namespaces.
+Each namespace can only view and manage the resources that belong to it, with other namespaces being isolated from each
+other. This isolation is achieved by assigning ownership of names and prefixes to specific namespaces.
### Desired State
-Whenever you deploy a Kafka resource using Ns4Kafka, the solution saves it to a dedicated topic and synchronizes the Kafka cluster to ensure that the resource's desired state is achieved.
+Whenever you deploy a Kafka resource using Ns4Kafka, the solution saves it to a dedicated topic and synchronizes the
+Kafka cluster to ensure that the resource's desired state is achieved.
### Server Side Validation
-Ns4Kafka allows you to apply customizable validation rules to ensure that your resources are configured with the appropriate values.
+Ns4Kafka allows you to apply customizable validation rules to ensure that your resources are configured with the
+appropriate values.
### CLI
-Ns4Kafka includes [Kafkactl](https://github.com/michelin/kafkactl), a command-line interface (CLI) that enables you to deploy your Kafka resources 'as code' within your namespace using YAML descriptors. This tool can also be used in continuous integration/continuous delivery (CI/CD) pipelines.
+Ns4Kafka includes [Kafkactl](https://github.com/michelin/kafkactl), a command-line interface (CLI) that enables you to
+deploy your Kafka resources 'as code' within your namespace using YAML descriptors. This tool can also be used in
+continuous integration/continuous delivery (CI/CD) pipelines.
## Download
-You can download Ns4Kafka as a fat jar from the project's releases page on GitHub at https://github.com/michelin/ns4kafka/releases.
+You can download Ns4Kafka as a fat jar from the project's releases page on GitHub
+at https://github.com/michelin/ns4kafka/releases.
Additionally, a Docker image of the solution is available at https://hub.docker.com/repository/docker/michelin/ns4kafka.
@@ -61,15 +71,18 @@ Additionally, a Docker image of the solution is available at https://hub.docker.
To operate, Ns4Kafka requires a Kafka broker for data storage and GitLab for user authentication.
-The solution is built on the [Micronaut framework](https://micronaut.io/) and can be configured with any [Micronaut property source loader](https://docs.micronaut.io/1.3.0.M1/guide/index.html#_included_propertysource_loaders).
+The solution is built on the [Micronaut framework](https://micronaut.io/) and can be configured with
+any [Micronaut property source loader](https://docs.micronaut.io/1.3.0.M1/guide/index.html#_included_propertysource_loaders).
-To override the default properties from the `application.yml` file, you can set the `micronaut.config.file` system property when running the fat jar file, like so:
+To override the default properties from the `application.yml` file, you can set the `micronaut.config.file` system
+property when running the fat jar file, like so:
```console
java -Dmicronaut.config.file=application.yml -jar ns4kafka.jar
```
-Alternatively, you can set the `MICRONAUT_CONFIG_FILE` environment variable and then run the jar file without additional parameters, as shown below:
+Alternatively, you can set the `MICRONAUT_CONFIG_FILE` environment variable and then run the jar file without additional
+parameters, as shown below:
```console
MICRONAUT_CONFIG_FILE=application.yml
@@ -85,6 +98,7 @@ docker-compose up -d
```
This command will start multiple containers, including:
+
- 1 Zookeeper
- 1 Kafka broker
- 1 Schema registry
@@ -96,11 +110,16 @@ This command will start multiple containers, including:
Please note that SASL/SCRAM authentication and authorization using ACLs are enabled on the broker.
To get started, you'll need to perform the following steps:
-1. Define a GitLab admin group for Ns4Kafka in the `application.yml` file. You can find an example [here](#admin-account). It is recommended to choose a GitLab group you belong to in order to have admin rights.
-2. Define a GitLab token for Kafkactl in the `config.yml` file. You can refer to the installation instructions [here](https://github.com/michelin/kafkactl#install).
-3. Define a GitLab group you belong to in the role bindings of the `resources/admin/namespace.yml` file. This is demonstrated in the example [here](https://github.com/michelin/kafkactl#role-binding).
-## Configuration
+1. Define a GitLab admin group for Ns4Kafka in the `application.yml` file. You can find an
+ example [here](#admin-account). It is recommended to choose a GitLab group you belong to in order to have admin
+ rights.
+2. Define a GitLab token for Kafkactl in the `config.yml` file. You can refer to the installation
+ instructions [here](https://github.com/michelin/kafkactl#install).
+3. Define a GitLab group you belong to in the role bindings of the `resources/admin/namespace.yml` file. This is
+ demonstrated in the example [here](https://github.com/michelin/kafkactl#role-binding).
+
+## Configuration
### GitLab Authentication
@@ -131,7 +150,8 @@ ns4kafka:
admin-group: "MY_ADMIN_GROUP"
```
-If the admin group is set to "MY_ADMIN_GROUP", users will be granted admin privileges if they belong to the GitLab group "MY_ADMIN_GROUP".
+If the admin group is set to "MY_ADMIN_GROUP", users will be granted admin privileges if they belong to the GitLab
+group "MY_ADMIN_GROUP".
### Kafka Broker Authentication
@@ -179,7 +199,8 @@ ns4kafka:
basicAuthPassword: "password"
```
-The name for each managed cluster has to be unique. This is this name you have to set in the field **metadata.cluster** of your namespace descriptors.
+The name for each managed cluster has to be unique. This is this name you have to set in the field **metadata.cluster**
+of your namespace descriptors.
| Property | type | description |
|-----------------------------------------|---------|-------------------------------------------------------------|
@@ -201,9 +222,11 @@ The configuration will depend on the authentication method selected for your bro
### AKHQ
-[AKHQ](https://github.com/tchiotludo/akhq) can be integrated with Ns4Kafka to provide access to resources within your namespace during the authentication process.
+[AKHQ](https://github.com/tchiotludo/akhq) can be integrated with Ns4Kafka to provide access to resources within your
+namespace during the authentication process.
To enable this integration, follow these steps:
+
1. Configure LDAP authentication in AKHQ.
2. Add the Ns4Kafka claim endpoint to AKHQ's configuration:
@@ -219,7 +242,9 @@ For AKHQ versions from v0.20 to v0.24, use the `/akhq-claim/v2` endpoint.
For AKHQ versions prior to v0.20, use the `/akhq-claim/v1` endpoint.
3. In your Ns4Kafka configuration, specify the following settings for AKHQ:
+
* For AKHQ versions v0.25 and later
+
```yaml
ns4kafka:
akhq:
@@ -239,6 +264,7 @@ ns4kafka:
```
* For AKHQ versions prior to v0.25
+
```yaml
ns4kafka:
akhq:
@@ -276,12 +302,17 @@ metadata:
support-group: NAMESPACE-LDAP-GROUP
```
-Once the configuration is in place, after successful authentication in AKHQ, users belonging to the `NAMESPACE-LDAP-GROUP` will be able to access the resources within the `myNamespace` namespace.
+Once the configuration is in place, after successful authentication in AKHQ, users belonging to
+the `NAMESPACE-LDAP-GROUP` will be able to access the resources within the `myNamespace` namespace.
## Administration
-The setup of namespaces, owner ACLs, role bindings, and quotas is the responsibility of Ns4Kafka administrators, as these resources define the context in which project teams will work. To create your first namespace, please refer to the [Kafkactl documentation](https://github.com/michelin/kafkactl/blob/main/README.md#administrator).
+The setup of namespaces, owner ACLs, role bindings, and quotas is the responsibility of Ns4Kafka administrators, as
+these resources define the context in which project teams will work. To create your first namespace, please refer to
+the [Kafkactl documentation](https://github.com/michelin/kafkactl/blob/main/README.md#administrator).
## Contribution
-
-We welcome contributions from the community! Before you get started, please take a look at our [contribution guide](https://github.com/michelin/ns4kafka/blob/master/CONTRIBUTING.md) to learn about our guidelines and best practices. We appreciate your help in making Ns4Kafka a better tool for everyone.
+
+We welcome contributions from the community! Before you get started, please take a look at
+our [contribution guide](https://github.com/michelin/ns4kafka/blob/master/CONTRIBUTING.md) to learn about our guidelines
+and best practices. We appreciate your help in making Ns4Kafka a better tool for everyone.
diff --git a/build.gradle b/build.gradle
index 1b30300f..6716ebcb 100644
--- a/build.gradle
+++ b/build.gradle
@@ -4,6 +4,7 @@ plugins {
id("jacoco")
id("org.sonarqube") version "4.3.1.3277"
id("pl.allegro.tech.build.axion-release") version "1.15.4"
+ id("checkstyle")
}
version = scmVersion.version
@@ -87,25 +88,25 @@ dockerfile {
if (project.hasProperty("releaseLatest")) {
dockerBuild {
- images = ["michelin/ns4kafka:" + version, "michelin/ns4kafka:latest"]
+ images.set(["michelin/ns4kafka:" + version, "michelin/ns4kafka:latest"])
}
} else {
dockerBuild {
- images = ["michelin/ns4kafka:" + version]
+ images.set(["michelin/ns4kafka:" + version])
}
}
-tasks.withType(JavaCompile) {
+tasks.withType(JavaCompile).configureEach {
options.fork = true
options.forkOptions.jvmArgs << '-Dmicronaut.openapi.views.spec=rapidoc.enabled=true'
}
sonarqube {
- properties {
- property "sonar.projectKey", "michelin_ns4kafka"
- property "sonar.organization", "michelin"
- property "sonar.host.url", "https://sonarcloud.io"
- }
+ properties {
+ property "sonar.projectKey", "michelin_ns4kafka"
+ property "sonar.organization", "michelin"
+ property "sonar.host.url", "https://sonarcloud.io"
+ }
}
jacocoTestReport {
@@ -122,3 +123,11 @@ test {
}
}
+checkstyle {
+ toolVersion = '10.12.3'
+ configFile = file(".checkstyle/checkstyle.xml")
+ ignoreFailures = false
+ maxErrors = 0
+ maxWarnings = 0
+}
+
diff --git a/micronaut-cli.yml b/micronaut-cli.yml
index 410d6888..f91ac003 100644
--- a/micronaut-cli.yml
+++ b/micronaut-cli.yml
@@ -3,4 +3,4 @@ defaultPackage: com.michelin.ns4kafka
testFramework: junit
sourceLanguage: java
buildTool: gradle
-features: [annotation-api, app-name, gradle, http-client, jackson-databind, java, java-application, junit, kafka, logback, lombok, micronaut-aot, micronaut-build, micronaut-http-validation, mockito, netty-server, openapi, reactor, reactor-http-client, readme, security, security-annotations, security-jwt, security-ldap, shade, testcontainers, validation]
+features: [ annotation-api, app-name, gradle, http-client, jackson-databind, java, java-application, junit, kafka, logback, management, lombok, micronaut-aot, micronaut-build, micronaut-http-validation, mockito, netty-server, openapi, reactor, reactor-http-client, readme, security, security-annotations, security-jwt, security-ldap, shade, testcontainers, validation ]
diff --git a/src/main/java/com/michelin/ns4kafka/Application.java b/src/main/java/com/michelin/ns4kafka/Application.java
index d95546d4..182a61ec 100644
--- a/src/main/java/com/michelin/ns4kafka/Application.java
+++ b/src/main/java/com/michelin/ns4kafka/Application.java
@@ -9,21 +9,24 @@
import io.swagger.v3.oas.annotations.security.SecurityScheme;
import io.swagger.v3.oas.annotations.tags.Tag;
+/**
+ * Main class to start the application.
+ */
@SecurityScheme(name = "JWT",
- type = SecuritySchemeType.HTTP,
- scheme = "bearer",
- bearerFormat = "JWT")
+ type = SecuritySchemeType.HTTP,
+ scheme = "bearer",
+ bearerFormat = "JWT")
@OpenAPIDefinition(
- security = @SecurityRequirement(name = "JWT"),
- info = @Info(
- title = "Ns4Kafka",
- version = "0.1",
- description = "Getting started with REST APIs."
- )
+ security = @SecurityRequirement(name = "JWT"),
+ info = @Info(
+ title = "Ns4Kafka",
+ version = "0.1",
+ description = "Getting started with REST APIs."
+ )
)
@OpenAPIInclude(
- classes = { io.micronaut.security.endpoints.LoginController.class },
- tags = @Tag(name = "_Security", description = "All the login endpoints.")
+ classes = {io.micronaut.security.endpoints.LoginController.class},
+ tags = @Tag(name = "_Security", description = "All the login endpoints.")
)
public class Application {
diff --git a/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java b/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java
index 84ac9b81..0371a864 100644
--- a/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java
+++ b/src/main/java/com/michelin/ns4kafka/controllers/AkhqClaimProviderController.java
@@ -1,8 +1,8 @@
package com.michelin.ns4kafka.controllers;
-import com.michelin.ns4kafka.config.AkhqClaimProviderControllerConfig;
-import com.michelin.ns4kafka.config.KafkaAsyncExecutorConfig;
import com.michelin.ns4kafka.models.AccessControlEntry;
+import com.michelin.ns4kafka.properties.AkhqProperties;
+import com.michelin.ns4kafka.properties.ManagedClusterProperties;
import com.michelin.ns4kafka.services.AccessControlEntryService;
import com.michelin.ns4kafka.services.NamespaceService;
import io.micronaut.core.annotation.Introspected;
@@ -11,17 +11,25 @@
import io.micronaut.http.annotation.Post;
import io.micronaut.security.rules.SecurityRule;
import io.swagger.v3.oas.annotations.tags.Tag;
-import jakarta.inject.Inject;
-import lombok.Builder;
-import lombok.Data;
-import lombok.Getter;
-
import jakarta.annotation.security.RolesAllowed;
+import jakarta.inject.Inject;
import jakarta.validation.Valid;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
+import lombok.Builder;
+import lombok.Data;
+import lombok.Getter;
+/**
+ * Controller to manage AKHQ claims.
+ */
@Tag(name = "AKHQ", description = "Manage the AKHQ endpoints.")
@RolesAllowed(SecurityRule.IS_ANONYMOUS)
@Controller("/akhq-claim")
@@ -31,7 +39,7 @@ public class AkhqClaimProviderController {
private static final List ADMIN_REGEXP = List.of(".*");
@Inject
- AkhqClaimProviderControllerConfig config;
+ AkhqProperties config;
@Inject
AccessControlEntryService accessControlEntryService;
@@ -40,104 +48,110 @@ public class AkhqClaimProviderController {
NamespaceService namespaceService;
@Inject
- List managedClusters;
+ List managedClusters;
/**
- * List AKHQ claims (v019 and prior)
+ * List AKHQ claims (v019 and prior).
+ *
* @param request The AKHQ request
* @return The AKHQ claims
*/
@Post
- public AKHQClaimResponse generateClaim(@Valid @Body AKHQClaimRequest request) {
+ public AkhqClaimResponse generateClaim(@Valid @Body AkhqClaimRequest request) {
if (request == null) {
- return AKHQClaimResponse.ofEmpty(config.getFormerRoles());
+ return AkhqClaimResponse.ofEmpty(config.getFormerRoles());
}
final List groups = Optional.ofNullable(request.getGroups()).orElse(new ArrayList<>());
if (groups.contains(config.getAdminGroup())) {
- return AKHQClaimResponse.ofAdmin(config.getFormerAdminRoles());
+ return AkhqClaimResponse.ofAdmin(config.getFormerAdminRoles());
}
- List relatedACL = namespaceService.listAll()
- .stream()
- .filter(namespace -> namespace.getMetadata().getLabels() != null &&
- groups.contains(namespace.getMetadata().getLabels().getOrDefault(config.getGroupLabel(), "_")))
- .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream())
- .collect(Collectors.toList());
+ List relatedAcl = namespaceService.listAll()
+ .stream()
+ .filter(namespace -> namespace.getMetadata().getLabels() != null
+ && groups.contains(namespace.getMetadata().getLabels().getOrDefault(config.getGroupLabel(), "_")))
+ .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream())
+ .collect(Collectors.toList());
// Add all public ACLs.
- relatedACL.addAll(accessControlEntryService.findAllPublicGrantedTo());
-
- return AKHQClaimResponse.builder()
- .roles(config.getFormerRoles())
- .attributes(
- Map.of(
- "topicsFilterRegexp", computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.TOPIC),
- "connectsFilterRegexp", computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.CONNECT),
- "consumerGroupsFilterRegexp", ADMIN_REGEXP
- )
+ relatedAcl.addAll(accessControlEntryService.findAllPublicGrantedTo());
+
+ return AkhqClaimResponse.builder()
+ .roles(config.getFormerRoles())
+ .attributes(
+ Map.of(
+ "topicsFilterRegexp",
+ computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.TOPIC),
+ "connectsFilterRegexp",
+ computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.CONNECT),
+ "consumerGroupsFilterRegexp", ADMIN_REGEXP
)
- .build();
+ )
+ .build();
}
/**
- * List AKHQ claims (v020 to 024)
+ * List AKHQ claims (v020 to 024).
+ *
* @param request The AKHQ request
* @return The AKHQ claims
*/
@Post("/v2")
- public AKHQClaimResponseV2 generateClaimV2(@Valid @Body AKHQClaimRequest request) {
+ public AkhqClaimResponseV2 generateClaimV2(@Valid @Body AkhqClaimRequest request) {
if (request == null) {
- return AKHQClaimResponseV2.ofEmpty(config.getFormerRoles());
+ return AkhqClaimResponseV2.ofEmpty(config.getFormerRoles());
}
final List groups = Optional.ofNullable(request.getGroups()).orElse(new ArrayList<>());
if (groups.contains(config.getAdminGroup())) {
- return AKHQClaimResponseV2.ofAdmin(config.getFormerAdminRoles());
+ return AkhqClaimResponseV2.ofAdmin(config.getFormerAdminRoles());
}
- List relatedACL = getAllAclForGroups(groups);
+ List relatedAcl = getAllAclForGroups(groups);
// Add all public ACLs.
- relatedACL.addAll(accessControlEntryService.findAllPublicGrantedTo());
-
- return AKHQClaimResponseV2.builder()
- .roles(config.getFormerRoles())
- .topicsFilterRegexp(computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.TOPIC))
- .connectsFilterRegexp(computeAllowedRegexListForResourceType(relatedACL, AccessControlEntry.ResourceType.CONNECT))
- .consumerGroupsFilterRegexp(ADMIN_REGEXP)
- .build();
+ relatedAcl.addAll(accessControlEntryService.findAllPublicGrantedTo());
+
+ return AkhqClaimResponseV2.builder()
+ .roles(config.getFormerRoles())
+ .topicsFilterRegexp(
+ computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.TOPIC))
+ .connectsFilterRegexp(
+ computeAllowedRegexListForResourceType(relatedAcl, AccessControlEntry.ResourceType.CONNECT))
+ .consumerGroupsFilterRegexp(ADMIN_REGEXP)
+ .build();
}
/**
- * List AKHQ claims (v025 and higher)
+ * List AKHQ claims (v025 and higher).
*
* @param request The AKHQ request
* @return The AKHQ claims
*/
@Post("/v3")
- public AKHQClaimResponseV3 generateClaimV3(@Valid @Body AKHQClaimRequest request) {
+ public AkhqClaimResponseV3 generateClaimV3(@Valid @Body AkhqClaimRequest request) {
final List groups = Optional.ofNullable(request.getGroups()).orElse(new ArrayList<>());
if (groups.contains(config.getAdminGroup())) {
- return AKHQClaimResponseV3.ofAdmin(config.getAdminRoles());
+ return AkhqClaimResponseV3.ofAdmin(config.getAdminRoles());
}
- List relatedACL = getAllAclForGroups(groups);
+ List relatedAcl = getAllAclForGroups(groups);
// Add all public ACLs
- relatedACL.addAll(accessControlEntryService.findAllPublicGrantedTo());
+ relatedAcl.addAll(accessControlEntryService.findAllPublicGrantedTo());
// Remove unnecessary ACLs (project.topic1 when project.* is granted on the same resource type and cluster)
- optimizeACL(relatedACL);
+ optimizeAcl(relatedAcl);
- Map bindings = new LinkedHashMap<>();
+ Map bindings = new LinkedHashMap<>();
// Start by creating a map that store permissions by role/cluster
- relatedACL.forEach(acl -> {
+ relatedAcl.forEach(acl -> {
String escapedString = Pattern.quote(acl.getSpec().getResource());
String patternRegex;
@@ -163,29 +177,51 @@ public AKHQClaimResponseV3 generateClaimV3(@Valid @Body AKHQClaimRequest request
clusters.add(patternCluster);
// Otherwise we add a new one
- bindings.put(key, AKHQClaimResponseV3.Group.builder()
- .role(role)
- .patterns(regexes)
- .clusters(clusters)
- .build());
+ bindings.put(key, AkhqClaimResponseV3.Group.builder()
+ .role(role)
+ .patterns(regexes)
+ .clusters(clusters)
+ .build());
}
});
- List result = optimizeV3Claim(bindings);
+ List result = optimizeV3Claim(bindings);
// Add the same pattern and cluster filtering for SCHEMA as the TOPIC ones
result.addAll(result.stream()
- .filter(g -> g.role.equals(config.getRoles().get(AccessControlEntry.ResourceType.TOPIC)))
- .map(g -> AKHQClaimResponseV3.Group.builder()
- .role(config.getRoles().get(AccessControlEntry.ResourceType.SCHEMA))
- .patterns(g.getPatterns())
- .clusters(g.getClusters())
- .build()
- ).toList());
-
- return AKHQClaimResponseV3.builder()
- .groups(result.isEmpty() ? null : Map.of("group", result))
- .build();
+ .filter(g -> g.role.equals(config.getRoles().get(AccessControlEntry.ResourceType.TOPIC)))
+ .map(g -> AkhqClaimResponseV3.Group.builder()
+ .role(config.getRoles().get(AccessControlEntry.ResourceType.SCHEMA))
+ .patterns(g.getPatterns())
+ .clusters(g.getClusters())
+ .build()
+ ).toList());
+
+ return AkhqClaimResponseV3.builder()
+ .groups(result.isEmpty() ? null : Map.of("group", result))
+ .build();
+ }
+
+ /**
+ * Remove ACL that are already included by another ACL on the same resource and cluster
+ * Ex: LITERAL ACL1 with project.topic1 resource + PREFIXED ACL2 with project -> return ACL2 only
+ *
+ * @param acl the input list of acl to optimize
+ */
+ private void optimizeAcl(List acl) {
+ acl.removeIf(accessControlEntry -> acl.stream()
+ // Keep PREFIXED ACL with a different resource but same resource type and cluster
+ .filter(accessControlEntryOther -> accessControlEntryOther.getSpec().getResourcePatternType()
+ .equals(AccessControlEntry.ResourcePatternType.PREFIXED)
+ &&
+ !accessControlEntryOther.getSpec().getResource().equals(accessControlEntry.getSpec().getResource())
+ && accessControlEntryOther.getSpec().getResourceType()
+ .equals(accessControlEntry.getSpec().getResourceType())
+ && accessControlEntryOther.getMetadata().getCluster()
+ .equals(accessControlEntry.getMetadata().getCluster()))
+ .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource())
+ // Remove the ACL if there is one that contains the current resource
+ .anyMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString)));
}
/**
@@ -194,8 +230,8 @@ public AKHQClaimResponseV3 generateClaimV3(@Valid @Body AKHQClaimRequest request
* @param bindings - the raw claim
* @return an optimized claim
*/
- private List optimizeV3Claim(Map bindings) {
- List result = new ArrayList<>();
+ private List optimizeV3Claim(Map bindings) {
+ List result = new ArrayList<>();
// Extract the clusters name from the managedClusters configuration
List clusters = managedClusters.stream().map(c -> String.format("^%s$", c.getName())).toList();
@@ -208,173 +244,201 @@ private List optimizeV3Claim(Map result.stream()
- // Search bindings with the same role and cluster filtering
- .filter(r -> r.role.equals(value.role) && r.clusters.size() == value.clusters.size()
- && new HashSet<>(r.clusters).containsAll(value.clusters)
- && new HashSet<>(value.clusters).containsAll(r.clusters))
- .findFirst()
- .ifPresentOrElse(
- // If there is any we can merge the patterns and keep only 1 binding
- toMerge -> toMerge.patterns.addAll(value.getPatterns()),
- // Otherwise we add the current binding
- () -> result.add(value)
- ));
+ // Search bindings with the same role and cluster filtering
+ .filter(r -> r.role.equals(value.role) && r.clusters.size() == value.clusters.size()
+ && new HashSet<>(r.clusters).containsAll(value.clusters)
+ && new HashSet<>(value.clusters).containsAll(r.clusters))
+ .findFirst()
+ .ifPresentOrElse(
+ // If there is any we can merge the patterns and keep only 1 binding
+ toMerge -> toMerge.patterns.addAll(value.getPatterns()),
+ // Otherwise we add the current binding
+ () -> result.add(value)
+ ));
return result;
}
/**
- * List all the ACL for a user based on its LDAP groups
+ * List all the ACL for a user based on its LDAP groups.
+ *
* @param groups the user LDAP groups
* @return the user's ACL
*/
private List getAllAclForGroups(List groups) {
return namespaceService.listAll()
- .stream()
- .filter(namespace -> namespace.getMetadata().getLabels() != null &&
- // Split by comma the groupLabel to support multiple groups and compare with user groups
- !Collections.disjoint(groups,
- List.of(namespace.getMetadata().getLabels()
- .getOrDefault(config.getGroupLabel(), "_")
- .split(","))))
- .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream())
- .collect(Collectors.toList());
+ .stream()
+ .filter(namespace -> namespace.getMetadata().getLabels() != null
+ // Split by comma the groupLabel to support multiple groups and compare with user groups
+ && !Collections.disjoint(groups, List.of(namespace.getMetadata().getLabels()
+ .getOrDefault(config.getGroupLabel(), "_")
+ .split(","))))
+ .flatMap(namespace -> accessControlEntryService.findAllGrantedToNamespace(namespace).stream())
+ .collect(Collectors.toList());
}
/**
- * Compute AKHQ regexes from given ACLs
- * @param acls The ACLs
+ * Compute AKHQ regexes from given ACLs.
+ *
+ * @param acls The ACLs
* @param resourceType The resource type
* @return A list of regex
*/
- public List computeAllowedRegexListForResourceType(List acls, AccessControlEntry.ResourceType resourceType) {
+ public List computeAllowedRegexListForResourceType(List acls,
+ AccessControlEntry.ResourceType resourceType) {
List allowedRegex = acls.stream()
- .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == resourceType)
- .filter(accessControlEntry ->
- acls.stream()
- .filter(accessControlEntryOther -> !accessControlEntryOther.getSpec().getResource().equals(accessControlEntry.getSpec().getResource()))
- .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource())
- .noneMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString)))
- .map(accessControlEntry -> {
- String escapedString = Pattern.quote(accessControlEntry.getSpec().getResource());
- if (accessControlEntry.getSpec().getResourcePatternType() == AccessControlEntry.ResourcePatternType.PREFIXED) {
- return String.format("^%s.*$", escapedString);
- } else {
- return String.format("^%s$", escapedString);
- }
- })
- .distinct()
- .toList();
+ .filter(accessControlEntry -> accessControlEntry.getSpec().getResourceType() == resourceType)
+ .filter(accessControlEntry ->
+ acls.stream()
+ .filter(accessControlEntryOther -> !accessControlEntryOther.getSpec().getResource()
+ .equals(accessControlEntry.getSpec().getResource()))
+ .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource())
+ .noneMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString)))
+ .map(accessControlEntry -> {
+ String escapedString = Pattern.quote(accessControlEntry.getSpec().getResource());
+ if (accessControlEntry.getSpec().getResourcePatternType()
+ == AccessControlEntry.ResourcePatternType.PREFIXED) {
+ return String.format("^%s.*$", escapedString);
+ } else {
+ return String.format("^%s$", escapedString);
+ }
+ })
+ .distinct()
+ .toList();
//AKHQ considers empty list as "^.*$" so we must return something
return !allowedRegex.isEmpty() ? allowedRegex : EMPTY_REGEXP;
}
/**
- * Remove ACL that are already included by another ACL on the same resource and cluster
- * Ex: LITERAL ACL1 with project.topic1 resource + PREFIXED ACL2 with project -> return ACL2 only
- *
- * @param acl the input list of acl to optimize
+ * AKHQ request.
*/
- private static void optimizeACL(List acl) {
- acl.removeIf(accessControlEntry -> acl.stream()
- // Keep PREFIXED ACL with a different resource but same resource type and cluster
- .filter(accessControlEntryOther ->
- accessControlEntryOther.getSpec().getResourcePatternType().equals(AccessControlEntry.ResourcePatternType.PREFIXED)
- && !accessControlEntryOther.getSpec().getResource().equals(accessControlEntry.getSpec().getResource())
- && accessControlEntryOther.getSpec().getResourceType().equals(accessControlEntry.getSpec().getResourceType())
- && accessControlEntryOther.getMetadata().getCluster().equals(accessControlEntry.getMetadata().getCluster()))
- .map(accessControlEntryOther -> accessControlEntryOther.getSpec().getResource())
- // Remove the ACL if there is one that contains the current resource
- .anyMatch(escapedString -> accessControlEntry.getSpec().getResource().startsWith(escapedString)));
- }
-
@Introspected
@Builder
@Getter
- public static class AKHQClaimRequest {
+ public static class AkhqClaimRequest {
String providerType;
String providerName;
String username;
List groups;
}
+ /**
+ * AKHQ response.
+ */
@Introspected
@Builder
@Getter
- public static class AKHQClaimResponse {
+ public static class AkhqClaimResponse {
private List roles;
private Map> attributes;
- public static AKHQClaimResponse ofEmpty(List roles) {
- return AKHQClaimResponse.builder()
- .roles(roles)
- .attributes(Map.of(
- //AKHQ considers empty list as "^.*$" so we must return something
- "topicsFilterRegexp", EMPTY_REGEXP,
- "connectsFilterRegexp", EMPTY_REGEXP,
- "consumerGroupsFilterRegexp", EMPTY_REGEXP
- ))
- .build();
+ /**
+ * Build an empty AKHQ response.
+ *
+ * @param roles the roles
+ * @return the AKHQ response
+ */
+ public static AkhqClaimResponse ofEmpty(List roles) {
+ return AkhqClaimResponse.builder()
+ .roles(roles)
+ .attributes(Map.of(
+ // AKHQ considers empty list as "^.*$" so we must return something
+ "topicsFilterRegexp", EMPTY_REGEXP,
+ "connectsFilterRegexp", EMPTY_REGEXP,
+ "consumerGroupsFilterRegexp", EMPTY_REGEXP
+ ))
+ .build();
}
- public static AKHQClaimResponse ofAdmin(List roles) {
-
- return AKHQClaimResponse.builder()
- .roles(roles)
- .attributes(Map.of(
- //AKHQ considers empty list as "^.*$" so we must return something
- "topicsFilterRegexp", ADMIN_REGEXP,
- "connectsFilterRegexp", ADMIN_REGEXP,
- "consumerGroupsFilterRegexp", ADMIN_REGEXP
- ))
- .build();
+ /**
+ * Build an AKHQ response for an admin.
+ *
+ * @param roles the roles
+ * @return the AKHQ response
+ */
+ public static AkhqClaimResponse ofAdmin(List roles) {
+ return AkhqClaimResponse.builder()
+ .roles(roles)
+ .attributes(Map.of(
+ // AKHQ considers empty list as "^.*$" so we must return something
+ "topicsFilterRegexp", ADMIN_REGEXP,
+ "connectsFilterRegexp", ADMIN_REGEXP,
+ "consumerGroupsFilterRegexp", ADMIN_REGEXP
+ ))
+ .build();
}
}
+ /**
+ * AKHQ response (v2).
+ */
@Introspected
@Builder
@Getter
- public static class AKHQClaimResponseV2 {
+ public static class AkhqClaimResponseV2 {
private List roles;
private List topicsFilterRegexp;
private List connectsFilterRegexp;
private List consumerGroupsFilterRegexp;
- public static AKHQClaimResponseV2 ofEmpty(List roles) {
- return AKHQClaimResponseV2.builder()
- .roles(roles)
- .topicsFilterRegexp(EMPTY_REGEXP)
- .connectsFilterRegexp(EMPTY_REGEXP)
- .consumerGroupsFilterRegexp(EMPTY_REGEXP)
- .build();
+ /**
+ * Build an empty AKHQ response.
+ *
+ * @param roles the roles
+ * @return the AKHQ response
+ */
+ public static AkhqClaimResponseV2 ofEmpty(List roles) {
+ return AkhqClaimResponseV2.builder()
+ .roles(roles)
+ .topicsFilterRegexp(EMPTY_REGEXP)
+ .connectsFilterRegexp(EMPTY_REGEXP)
+ .consumerGroupsFilterRegexp(EMPTY_REGEXP)
+ .build();
}
- public static AKHQClaimResponseV2 ofAdmin(List roles) {
-
- return AKHQClaimResponseV2.builder()
- .roles(roles)
- .topicsFilterRegexp(ADMIN_REGEXP)
- .connectsFilterRegexp(ADMIN_REGEXP)
- .consumerGroupsFilterRegexp(ADMIN_REGEXP)
- .build();
+ /**
+ * Build an AKHQ response for an admin.
+ *
+ * @param roles the roles
+ * @return the AKHQ response
+ */
+ public static AkhqClaimResponseV2 ofAdmin(List roles) {
+ return AkhqClaimResponseV2.builder()
+ .roles(roles)
+ .topicsFilterRegexp(ADMIN_REGEXP)
+ .connectsFilterRegexp(ADMIN_REGEXP)
+ .consumerGroupsFilterRegexp(ADMIN_REGEXP)
+ .build();
}
}
+ /**
+ * AKHQ response (v3).
+ */
@Introspected
@Builder
@Getter
- public static class AKHQClaimResponseV3 {
+ public static class AkhqClaimResponseV3 {
private Map> groups;
- public static AKHQClaimResponseV3 ofAdmin(Map newAdminRoles) {
- return AKHQClaimResponseV3.builder()
- .groups(Map.of("group",
- newAdminRoles.values().stream()
- .map(r -> Group.builder().role(r).build()).collect(Collectors.toList())))
- .build();
+ /**
+ * Build an AKHQ response for an admin.
+ *
+ * @param newAdminRoles the roles
+ * @return the AKHQ response
+ */
+ public static AkhqClaimResponseV3 ofAdmin(Map newAdminRoles) {
+ return AkhqClaimResponseV3.builder()
+ .groups(Map.of("group",
+ newAdminRoles.values().stream()
+ .map(r -> Group.builder().role(r).build()).collect(Collectors.toList())))
+ .build();
}
+ /**
+ * AKHQ group.
+ */
@Data
@Builder
@Introspected
diff --git a/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java b/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java
index 68d5aca0..3e6520cd 100644
--- a/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java
+++ b/src/main/java/com/michelin/ns4kafka/controllers/ApiResourcesController.java
@@ -10,150 +10,152 @@
import io.micronaut.security.authentication.Authentication;
import io.micronaut.security.rules.SecurityRule;
import io.swagger.v3.oas.annotations.tags.Tag;
+import jakarta.annotation.security.RolesAllowed;
import jakarta.inject.Inject;
+import java.util.Collection;
+import java.util.List;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
-import jakarta.annotation.security.RolesAllowed;
-import java.util.Collection;
-import java.util.List;
-import java.util.stream.Collectors;
-
+/**
+ * Controller to manage API resources.
+ */
@Tag(name = "Resources", description = "Manage the API resources.")
@RolesAllowed(SecurityRule.IS_ANONYMOUS)
@Controller("/api-resources")
public class ApiResourcesController {
/**
- * ACL resource definition
+ * ACL resource definition.
*/
public static final ResourceDefinition ACL = ResourceDefinition.builder()
- .kind("AccessControlEntry")
- .namespaced(true)
- .synchronizable(false)
- .path("acls")
- .names(List.of("acls", "acl", "ac"))
- .build();
+ .kind("AccessControlEntry")
+ .namespaced(true)
+ .synchronizable(false)
+ .path("acls")
+ .names(List.of("acls", "acl", "ac"))
+ .build();
/**
- * Connector resource definition
+ * Connector resource definition.
*/
public static final ResourceDefinition CONNECTOR = ResourceDefinition.builder()
- .kind("Connector")
- .namespaced(true)
- .synchronizable(true)
- .path("connectors")
- .names(List.of("connects", "connect", "co"))
- .build();
+ .kind("Connector")
+ .namespaced(true)
+ .synchronizable(true)
+ .path("connectors")
+ .names(List.of("connects", "connect", "co"))
+ .build();
/**
- * Kafka Streams resource definition
+ * Kafka Streams resource definition.
*/
public static final ResourceDefinition KSTREAM = ResourceDefinition.builder()
- .kind("KafkaStream")
- .namespaced(true)
- .synchronizable(false)
- .path("streams")
- .names(List.of("streams", "stream", "st"))
- .build();
+ .kind("KafkaStream")
+ .namespaced(true)
+ .synchronizable(false)
+ .path("streams")
+ .names(List.of("streams", "stream", "st"))
+ .build();
/**
- * Role binding resource definition
+ * Role binding resource definition.
*/
public static final ResourceDefinition ROLE_BINDING = ResourceDefinition.builder()
- .kind("RoleBinding")
- .namespaced(true)
- .synchronizable(false)
- .path("role-bindings")
- .names(List.of("rolebindings", "rolebinding", "rb"))
- .build();
+ .kind("RoleBinding")
+ .namespaced(true)
+ .synchronizable(false)
+ .path("role-bindings")
+ .names(List.of("rolebindings", "rolebinding", "rb"))
+ .build();
/**
- * Topic resource definition
+ * Topic resource definition.
*/
public static final ResourceDefinition TOPIC = ResourceDefinition.builder()
- .kind("Topic")
- .namespaced(true)
- .synchronizable(true)
- .path("topics")
- .names(List.of("topics", "topic", "to"))
- .build();
+ .kind("Topic")
+ .namespaced(true)
+ .synchronizable(true)
+ .path("topics")
+ .names(List.of("topics", "topic", "to"))
+ .build();
/**
- * Schema resource definition
+ * Schema resource definition.
*/
public static final ResourceDefinition SCHEMA = ResourceDefinition.builder()
- .kind("Schema")
- .namespaced(true)
- .synchronizable(false)
- .path("schemas")
- .names(List.of("schemas", "schema", "sc"))
- .build();
+ .kind("Schema")
+ .namespaced(true)
+ .synchronizable(false)
+ .path("schemas")
+ .names(List.of("schemas", "schema", "sc"))
+ .build();
/**
- * Resource quota resource definition
+ * Resource quota resource definition.
*/
public static final ResourceDefinition RESOURCE_QUOTA = ResourceDefinition.builder()
- .kind("ResourceQuota")
- .namespaced(true)
- .synchronizable(false)
- .path("resource-quotas")
- .names(List.of("resource-quotas", "resource-quota", "quotas", "quota", "qu"))
- .build();
+ .kind("ResourceQuota")
+ .namespaced(true)
+ .synchronizable(false)
+ .path("resource-quotas")
+ .names(List.of("resource-quotas", "resource-quota", "quotas", "quota", "qu"))
+ .build();
/**
- * Connect worker resource definition
+ * Connect worker resource definition.
*/
public static final ResourceDefinition CONNECT_CLUSTER = ResourceDefinition.builder()
- .kind("ConnectCluster")
- .namespaced(true)
- .synchronizable(false)
- .path("connect-clusters")
- .names(List.of("connect-clusters", "connect-cluster", "cc"))
- .build();
+ .kind("ConnectCluster")
+ .namespaced(true)
+ .synchronizable(false)
+ .path("connect-clusters")
+ .names(List.of("connect-clusters", "connect-cluster", "cc"))
+ .build();
/**
- * Namespace resource definition
+ * Namespace resource definition.
*/
public static final ResourceDefinition NAMESPACE = ResourceDefinition.builder()
- .kind("Namespace")
- .namespaced(false)
- .synchronizable(false)
- .path("namespaces")
- .names(List.of("namespaces", "namespace", "ns"))
- .build();
+ .kind("Namespace")
+ .namespaced(false)
+ .synchronizable(false)
+ .path("namespaces")
+ .names(List.of("namespaces", "namespace", "ns"))
+ .build();
/**
- * Role binding repository
+ * Role binding repository.
*/
@Inject
RoleBindingRepository roleBindingRepository;
/**
- * List API resources
+ * List API resources.
+ *
* @param authentication The authentication
* @return The list of API resources
*/
@Get
public List list(@Nullable Authentication authentication) {
List all = List.of(
- ACL,
- CONNECTOR,
- KSTREAM,
- ROLE_BINDING,
- RESOURCE_QUOTA,
- CONNECT_CLUSTER,
- TOPIC,
- NAMESPACE,
- SCHEMA
+ ACL,
+ CONNECTOR,
+ KSTREAM,
+ ROLE_BINDING,
+ RESOURCE_QUOTA,
+ CONNECT_CLUSTER,
+ TOPIC,
+ NAMESPACE,
+ SCHEMA
);
if (authentication == null) {
return all; // Backward compatibility for cli <= 1.3.0
}
- List roles = (List)authentication.getAttributes().getOrDefault("roles", List.of());
- List groups = (List) authentication.getAttributes().getOrDefault("groups",List.of());
+ List roles = (List) authentication.getAttributes().getOrDefault("roles", List.of());
+ List groups = (List) authentication.getAttributes().getOrDefault("groups", List.of());
if (roles.contains(ResourceBasedSecurityRule.IS_ADMIN)) {
return all;
@@ -161,15 +163,18 @@ public List list(@Nullable Authentication authentication) {
Collection roleBindings = roleBindingRepository.findAllForGroups(groups);
List authorizedResources = roleBindings.stream()
- .flatMap(roleBinding -> roleBinding.getSpec().getRole().getResourceTypes().stream())
- .distinct()
- .toList();
+ .flatMap(roleBinding -> roleBinding.getSpec().getRole().getResourceTypes().stream())
+ .distinct()
+ .toList();
return all.stream()
- .filter(resourceDefinition -> authorizedResources.contains(resourceDefinition.getPath()))
- .toList();
+ .filter(resourceDefinition -> authorizedResources.contains(resourceDefinition.getPath()))
+ .toList();
}
+ /**
+ * API resource definition.
+ */
@Introspected
@Builder
@Getter
diff --git a/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java b/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java
index 336c5218..24c3864f 100644
--- a/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java
+++ b/src/main/java/com/michelin/ns4kafka/controllers/ConnectorController.java
@@ -11,20 +11,28 @@
import io.micronaut.http.HttpResponse;
import io.micronaut.http.HttpStatus;
import io.micronaut.http.MutableHttpResponse;
-import io.micronaut.http.annotation.*;
+import io.micronaut.http.annotation.Body;
+import io.micronaut.http.annotation.Controller;
+import io.micronaut.http.annotation.Delete;
+import io.micronaut.http.annotation.Get;
+import io.micronaut.http.annotation.Post;
+import io.micronaut.http.annotation.QueryValue;
+import io.micronaut.http.annotation.Status;
import io.micronaut.scheduling.TaskExecutors;
import io.micronaut.scheduling.annotation.ExecuteOn;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.inject.Inject;
-import reactor.core.publisher.Flux;
-import reactor.core.publisher.Mono;
-
import jakarta.validation.Valid;
import java.time.Instant;
import java.util.Date;
import java.util.List;
import java.util.Optional;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+/**
+ * Controller to manage connectors.
+ */
@Tag(name = "Connectors", description = "Manage the connectors.")
@Controller(value = "/api/namespaces/{namespace}/connectors")
@ExecuteOn(TaskExecutors.IO)
@@ -38,7 +46,8 @@ public class ConnectorController extends NamespacedResourceController {
ResourceQuotaService resourceQuotaService;
/**
- * List connectors by namespace
+ * List connectors by namespace.
+ *
* @param namespace The namespace
* @return A list of connectors
*/
@@ -48,7 +57,8 @@ public List list(String namespace) {
}
/**
- * Get a connector by namespace and name
+ * Get a connector by namespace and name.
+ *
* @param namespace The namespace
* @param connector The name
* @return A connector
@@ -59,21 +69,23 @@ public Optional getConnector(String namespace, String connector) {
}
/**
- * Delete a connector
+ * Delete a connector.
+ *
* @param namespace The current namespace
* @param connector The current connector name to delete
- * @param dryrun Run in dry mode or not
+ * @param dryrun Run in dry mode or not
* @return A HTTP response
*/
@Status(HttpStatus.NO_CONTENT)
@Delete("/{connector}{?dryrun}")
- public Mono> deleteConnector(String namespace, String connector, @QueryValue(defaultValue = "false") boolean dryrun) {
+ public Mono> deleteConnector(String namespace, String connector,
+ @QueryValue(defaultValue = "false") boolean dryrun) {
Namespace ns = getNamespace(namespace);
// Validate ownership
if (!connectorService.isNamespaceOwnerOfConnect(ns, connector)) {
return Mono.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector)),
- "Connector", connector));
+ "Connector", connector));
}
Optional optionalConnector = connectorService.findByName(ns, connector);
@@ -87,31 +99,34 @@ public Mono> deleteConnector(String namespace, String connect
Connector connectorToDelete = optionalConnector.get();
sendEventLog(connectorToDelete.getKind(),
- connectorToDelete.getMetadata(),
- ApplyStatus.deleted,
- connectorToDelete.getSpec(),
- null);
+ connectorToDelete.getMetadata(),
+ ApplyStatus.deleted,
+ connectorToDelete.getSpec(),
+ null);
return connectorService
- .delete(ns, optionalConnector.get())
- .map(httpResponse -> HttpResponse.noContent());
+ .delete(ns, optionalConnector.get())
+ .map(httpResponse -> HttpResponse.noContent());
}
/**
- * Create a connector
+ * Create a connector.
+ *
* @param namespace The namespace
- * @param connector The connector to create
- * @param dryrun Does the creation is a dry run
+ * @param connector The connector to create
+ * @param dryrun Does the creation is a dry run
* @return The created connector
*/
@Post("{?dryrun}")
- public Mono> apply(String namespace, @Valid @Body Connector connector, @QueryValue(defaultValue = "false") boolean dryrun) {
+ public Mono> apply(String namespace, @Valid @Body Connector connector,
+ @QueryValue(defaultValue = "false") boolean dryrun) {
Namespace ns = getNamespace(namespace);
// Validate ownership
if (!connectorService.isNamespaceOwnerOfConnect(ns, connector.getMetadata().getName())) {
- return Mono.error(new ResourceValidationException(List.of(String.format(NAMESPACE_NOT_OWNER, connector.getMetadata().getName())),
- connector.getKind(), connector.getMetadata().getName()));
+ return Mono.error(new ResourceValidationException(
+ List.of(String.format(NAMESPACE_NOT_OWNER, connector.getMetadata().getName())),
+ connector.getKind(), connector.getMetadata().getName()));
}
// Set / Override name in spec.config.name, required for several Kafka Connect API calls
@@ -126,67 +141,74 @@ public Mono> apply(String namespace, @Valid @Body Connec
// Validate locally
return connectorService.validateLocally(ns, connector)
- .flatMap(validationErrors -> {
- if (!validationErrors.isEmpty()) {
- return Mono.error(new ResourceValidationException(validationErrors, connector.getKind(), connector.getMetadata().getName()));
- }
-
- // Validate against connect rest API /validate
- return connectorService.validateRemotely(ns, connector)
- .flatMap(remoteValidationErrors -> {
- if (!remoteValidationErrors.isEmpty()) {
- return Mono.error(new ResourceValidationException(remoteValidationErrors, connector.getKind(), connector.getMetadata().getName()));
- }
-
- // Augment with server side fields
- connector.getMetadata().setCreationTimestamp(Date.from(Instant.now()));
- connector.getMetadata().setCluster(ns.getMetadata().getCluster());
- connector.getMetadata().setNamespace(ns.getMetadata().getName());
- connector.setStatus(Connector.ConnectorStatus.builder()
- .state(Connector.TaskState.UNASSIGNED)
- .build());
-
- Optional existingConnector = connectorService.findByName(ns, connector.getMetadata().getName());
- if (existingConnector.isPresent() && existingConnector.get().equals(connector)) {
- return Mono.just(formatHttpResponse(existingConnector.get(), ApplyStatus.unchanged));
- }
-
- ApplyStatus status = existingConnector.isPresent() ? ApplyStatus.changed : ApplyStatus.created;
-
- // Only check quota on connector creation
- if (status.equals(ApplyStatus.created)) {
- List quotaErrors = resourceQuotaService.validateConnectorQuota(ns);
- if (!quotaErrors.isEmpty()) {
- return Mono.error(new ResourceValidationException(quotaErrors, connector.getKind(), connector.getMetadata().getName()));
- }
- }
-
- if (dryrun) {
- return Mono.just(formatHttpResponse(connector, status));
- }
-
- sendEventLog(connector.getKind(), connector.getMetadata(), status,
- existingConnector.