diff --git a/.env b/.env
new file mode 100644
index 000000000..6c15939f8
--- /dev/null
+++ b/.env
@@ -0,0 +1,24 @@
+COMPOSE_FILE="compose.yml:compose.kafka-apache.yml"
+# COMPOSE_FILE="compose.yml:compose.kafka-redpanda.yml"
+# COMPOSE_FILE="compose.yml:compose.kafka-tansu.yml"
+# COMPOSE_FILE="compose.yml:compose.kafka-apache.yml:compose.monitoring.yml"
+
+# Database connection details.
+DB_USERNAME=dtrack
+DB_PASSWORD=dtrack
+DB_NAME=dtrack
+
+# Dependency-Track image tags.
+HYADES_TAG="snapshot-native"
+HYADES_APISERVER_TAG="snapshot"
+HYADES_FRONTEND_TAG="snapshot"
+
+# Infrastructure image tags.
+# Which ones are relevant depends on the compose files being used (see COMPOSE_FILE).
+KAFKA_APACHE_TAG="3.8.0"
+KAFKA_REDPANDA_TAG="v24.2.2"
+KAFKA_TANSU_TAG="latest"
+POSTGRES_TAG="17-alpine"
+
+# Tansu configuration.
+KAFKA_TANSU_DB_NAME="tansu"
\ No newline at end of file
diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml
index 8c1992fdf..b6b01e917 100644
--- a/.github/workflows/e2e-test.yml
+++ b/.github/workflows/e2e-test.yml
@@ -27,6 +27,14 @@ on:
hyades-version:
description: Version of Hyades services to test against
default: snapshot
+ kafka-provider:
+ description: The Kafka provider to test against
+ type: choice
+ options:
+ - APACHE
+ - APACHE_NATIVE
+ - REDPANDA
+ - TANSU
permissions: { }
@@ -45,8 +53,9 @@ jobs:
cache: maven
- name: Test
env:
- APISERVER_VERSION: ${{ inputs.apiserver-version || 'snapshot' }}
+ HYADES_APISERVER_VERSION: ${{ inputs.apiserver-version || 'snapshot' }}
HYADES_VERSION: ${{ inputs.hyades-version || 'snapshot' }}
+ KAFKA_PROVIDER: ${{ inputs.kafka-provider || 'APACHE_NATIVE' }}
OSSINDEX_USERNAME: ${{ secrets.OSSINDEX_USERNAME }}
OSSINDEX_TOKEN: ${{ secrets.OSSINDEX_TOKEN }}
run: mvn -B --no-transfer-progress -pl e2e clean verify -Pe2e-all
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index bdf800ab5..fe1aafde9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,7 +39,7 @@ nb-configuration.xml
*.rej
# Local environment
-.env
+!.env
# Secret key
secret.key
diff --git a/compose.kafka-apache.yml b/compose.kafka-apache.yml
new file mode 100644
index 000000000..34b37f4c3
--- /dev/null
+++ b/compose.kafka-apache.yml
@@ -0,0 +1,47 @@
+# This file is part of Dependency-Track.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) OWASP Foundation. All Rights Reserved.
+---
+services:
+ kafka:
+ image: "apache/kafka-native:${KAFKA_APACHE_TAG}"
+ environment:
+ CLUSTER_ID: 'RvQwrYegSUCkIPkaiAZQlQ'
+ KAFKA_NODE_ID: "1"
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT_HOST://localhost:9092,PLAINTEXT://kafka:29092'
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
+ KAFKA_LISTENERS: 'CONTROLLER://:29093,PLAINTEXT_HOST://:9092,PLAINTEXT://:29092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ healthcheck:
+ test: [ "CMD", "nc", "-z", "kafka", "9092"]
+ interval: 5s
+ timeout: 3s
+ retries: 3
+ ports:
+ - "9092:9092"
+ volumes:
+ - "kafka-data:/var/lib/kafka/data"
+ restart: unless-stopped
+
+volumes:
+ kafka-data: { }
\ No newline at end of file
diff --git a/compose.kafka-redpanda.yml b/compose.kafka-redpanda.yml
new file mode 100644
index 000000000..4f8c63152
--- /dev/null
+++ b/compose.kafka-redpanda.yml
@@ -0,0 +1,49 @@
+# This file is part of Dependency-Track.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) OWASP Foundation. All Rights Reserved.
+---
+services:
+ kafka:
+ image: "docker.redpanda.com/vectorized/redpanda:${KAFKA_REDPANDA_TAG}"
+ command:
+ - redpanda
+ - start
+ - --smp
+ - '1'
+ - --reserve-memory
+ - 0M
+ - --memory
+ - 512M
+ - --overprovisioned
+ - --node-id
+ - '0'
+ - --kafka-addr
+ - PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092
+ - --advertise-kafka-addr
+ - PLAINTEXT://kafka:29092,OUTSIDE://localhost:9092
+ healthcheck:
+ test: [ "CMD-SHELL", "rpk", "cluster", "health", "--exit-when-healthy" ]
+ interval: 5s
+ timeout: 3s
+ retries: 3
+ ports:
+ - "9092:9092"
+ volumes:
+ - "redpanda-data:/var/lib/redpanda/data"
+ restart: unless-stopped
+
+volumes:
+ redpanda-data: { }
\ No newline at end of file
diff --git a/compose.kafka-tansu.yml b/compose.kafka-tansu.yml
new file mode 100644
index 000000000..21957c332
--- /dev/null
+++ b/compose.kafka-tansu.yml
@@ -0,0 +1,61 @@
+# This file is part of Dependency-Track.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) OWASP Foundation. All Rights Reserved.
+---
+services:
+ tansu-init:
+ image: "postgres:${POSTGRES_TAG}"
+ entrypoint: /bin/bash
+ command: >-
+ -c '((psql -d ${KAFKA_TANSU_DB_NAME} -q -c "select 1 from cluster limit 1" >/dev/null 2>/dev/null) && echo "Already initialized")
+ || ((psql -c "CREATE DATABASE ${KAFKA_TANSU_DB_NAME}") && (wget -q -O- https://raw.githubusercontent.com/tansu-io/tansu/refs/heads/main/work-dir/initdb.d/010-schema.sql | psql -d tansu))'
+ depends_on:
+ postgres:
+ condition: service_healthy
+ environment:
+ PGHOST: "postgres"
+ PGUSER: "${DB_USERNAME}"
+ PGPASSWORD: "${DB_PASSWORD}"
+ restart: on-failure
+
+ kafka:
+ image: "ghcr.io/tansu-io/tansu:${KAFKA_TANSU_TAG}"
+ command: >-
+ --kafka-cluster-id RvQwrYegSUCkIPkaiAZQlQ
+ --kafka-node-id 1
+ --kafka-listener-url tcp://0.0.0.0:29092
+ --kafka-advertised-listener-url tcp://kafka:29092
+ --storage-engine pg=postgres://${DB_USERNAME}:${DB_PASSWORD}@postgres/${KAFKA_TANSU_DB_NAME}
+ --work-dir /data
+ depends_on:
+ postgres:
+ condition: service_healthy
+ tansu-init:
+ condition: service_completed_successfully
+ environment:
+ RUST_BACKTRACE: "1"
+ RUST_LOG: "info"
+ healthcheck:
+ test: [ "CMD", "/tansu-server", "-h" ]
+ interval: 5s
+ timeout: 3s
+ retries: 3
+ volumes:
+ - "tansu-data:/data"
+ restart: unless-stopped
+
+volumes:
+ tansu-data: { }
\ No newline at end of file
diff --git a/compose.monitoring.yml b/compose.monitoring.yml
new file mode 100644
index 000000000..dc0070d79
--- /dev/null
+++ b/compose.monitoring.yml
@@ -0,0 +1,61 @@
+# This file is part of Dependency-Track.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) OWASP Foundation. All Rights Reserved.
+---
+services:
+ redpanda-console:
+ image: docker.redpanda.com/vectorized/console:v2.7.0
+ depends_on:
+ kafka:
+ condition: service_healthy
+ environment:
+ CONFIG_FILEPATH: "/tmp/config.yml"
+ ports:
+ - "127.0.0.1:28080:8080"
+ volumes:
+ - "./monitoring/redpanda-console:/tmp/config.yml:ro"
+ - "./proto/src/main/proto:/etc/protos:ro"
+ restart: unless-stopped
+
+ prometheus:
+ image: prom/prometheus:v2.53.1
+ container_name: dt-prometheus
+ ports:
+ - "127.0.0.1:9090:9090"
+ volumes:
+ - "./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro"
+ - "prometheus-data:/prometheus"
+ restart: unless-stopped
+
+ grafana:
+ image: grafana/grafana-oss:11.1.3
+ container_name: dt-grafana
+ depends_on:
+ - prometheus
+ environment:
+ GF_SECURITY_ADMIN_USER: "admin"
+ GF_SECURITY_ADMIN_PASSWORD: "admin"
+ ports:
+ - "127.0.0.1:3000:3000"
+ volumes:
+ - "grafana-data:/var/lib/grafana"
+ - "./monitoring/grafana/dashboards:/etc/dashboards:ro"
+ - "./monitoring/grafana/provisioning:/etc/grafana/provisioning:ro"
+ restart: unless-stopped
+
+volumes:
+ grafana-data: { }
+ prometheus-data: { }
\ No newline at end of file
diff --git a/compose.yml b/compose.yml
new file mode 100644
index 000000000..6fb947d6e
--- /dev/null
+++ b/compose.yml
@@ -0,0 +1,191 @@
+# This file is part of Dependency-Track.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) OWASP Foundation. All Rights Reserved.
+---
+x-db-connection-config: &dbConnectionCfg
+ # For API server and initializer.
+ ALPINE_DATABASE_URL: "jdbc:postgresql://postgres:5432/${DB_NAME}?reWriteBatchedInserts=true"
+ ALPINE_DATABASE_USERNAME: "${DB_USERNAME}"
+ ALPINE_DATABASE_PASSWORD: "${DB_PASSWORD}"
+ # For Quarkus-based services.
+ QUARKUS_DATASOURCE_JDBC_URL: "jdbc:postgresql://postgres:5432/${DB_NAME}"
+ QUARKUS_DATASOURCE_USERNAME: "${DB_USERNAME}"
+ QUARKUS_DATASOURCE_PASSWORD: "${DB_PASSWORD}"
+ # For PostgreSQL itself.
+ POSTGRES_DB: "${DB_NAME}"
+ POSTGRES_USER: "${DB_USERNAME}"
+ POSTGRES_PASSWORD: "${DB_PASSWORD}"
+x-kafka-connection-config: &kafkaConnectionCfg
+ KAFKA_BOOTSTRAP_SERVERS: "kafka:29092"
+x-secret-key-config: &secretKeyCfg
+ # For API server.
+ ALPINE_SECRET_KEY_PATH: "/var/run/secrets/.dependency-track/keys/secret.key"
+ # For Quarkus-based services.
+ SECRET_KEY_PATH: "/var/run/secrets/.dependency-track/keys/secret.key"
+
+services:
+ initializer:
+ image: "ghcr.io/dependencytrack/hyades-apiserver:${HYADES_APISERVER_TAG}"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ environment:
+ <<: [ *dbConnectionCfg, *kafkaConnectionCfg ]
+ JAVA_OPTIONS: "-Xmx256m -XX:+UseSerialGC -XX:TieredStopAtLevel=1"
+ ALPINE_DATABASE_POOL_ENABLED: "false"
+ INIT_TASKS_ENABLED: "true"
+ INIT_TASKS_KAFKA_TOPICS_ENABLED: "true"
+ INIT_AND_EXIT: "true"
+ volumes:
+ - "secret-data:/data"
+ restart: on-failure
+
+ apiserver:
+ image: "ghcr.io/dependencytrack/hyades-apiserver:${HYADES_APISERVER_TAG}"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ initializer:
+ condition: service_completed_successfully
+ environment:
+ <<: [ *dbConnectionCfg, *kafkaConnectionCfg, *secretKeyCfg ]
+ # Limit maximum heap size to 1GB.
+ # Default would be 80% of available memory,
+ # which can cause problems on some workstations.
+ # For production deployments, the default should be used.
+ EXTRA_JAVA_OPTIONS: "-Xmx1g"
+ ALPINE_METRICS_ENABLED: "true"
+ INTEGRITY_CHECK_ENABLED: "true"
+ INIT_TASKS_ENABLED: "false"
+ ports:
+ - "127.0.0.1:8080:8080"
+ volumes:
+ - "apiserver-data:/data"
+ - "secret-data:/var/run/secrets:ro"
+ profiles:
+ - demo
+ restart: unless-stopped
+
+ frontend:
+ image: "ghcr.io/dependencytrack/hyades-frontend:${HYADES_FRONTEND_TAG}"
+ environment:
+ API_BASE_URL: "http://localhost:8080"
+ ports:
+ - "127.0.0.1:8081:8080"
+ profiles:
+ - demo
+ restart: unless-stopped
+
+ notification-publisher:
+ image: "ghcr.io/dependencytrack/hyades-notification-publisher:${HYADES_TAG}"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ initializer:
+ condition: service_completed_successfully
+ environment:
+ << : [ *dbConnectionCfg, *kafkaConnectionCfg, *secretKeyCfg ]
+ profiles:
+ - demo
+ volumes:
+ - "secret-data:/var/run/secrets:ro"
+ restart: unless-stopped
+
+ repo-meta-analyzer:
+ image: "ghcr.io/dependencytrack/hyades-repository-meta-analyzer:${HYADES_TAG}"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ initializer:
+ condition: service_completed_successfully
+ environment:
+ << : [ *dbConnectionCfg, *kafkaConnectionCfg, *secretKeyCfg ]
+ KAFKA_STREAMS_NUM_STREAM_THREADS: "6" # Default number of input partitions is 6
+ profiles:
+ - demo
+ volumes:
+ - "secret-data:/var/run/secrets:ro"
+ restart: unless-stopped
+
+ vuln-analyzer:
+ image: "ghcr.io/dependencytrack/hyades-vulnerability-analyzer:${HYADES_TAG}"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ initializer:
+ condition: service_completed_successfully
+ environment:
+ << : [ *dbConnectionCfg, *kafkaConnectionCfg ]
+ KAFKA_STREAMS_NUM_STREAM_THREADS: "12" # Default number of input partitions is 12
+ SCANNER_INTERNAL_ENABLED: "true"
+ # SCANNER_INTERNAL_TOPIC_PARTITIONS: "3"
+ SCANNER_OSSINDEX_ENABLED: "true"
+ # SCANNER_OSSINDEX_TOPIC_PARTITIONS: "3"
+ # SCANNER_OSSINDEX_API_USERNAME: "email@example.com"
+ # SCANNER_OSSINDEX_API_TOKEN: "your-token"
+ # SCANNER_SNYK_ENABLED: "true"
+ # SCANNER_SNYK_TOPIC_PARTITIONS: "3"
+ # SCANNER_SNYK_API_ORG_ID: "your-org-id"
+ # SCANNER_SNYK_API_TOKENS: "your-token-1,your-token-2"
+ profiles:
+ - demo
+ restart: unless-stopped
+
+ mirror-service:
+ image: "ghcr.io/dependencytrack/hyades-mirror-service:${HYADES_TAG}"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ initializer:
+ condition: service_completed_successfully
+ environment:
+ << : [ *dbConnectionCfg, *kafkaConnectionCfg, *secretKeyCfg ]
+ KAFKA_STREAMS_NUM_STREAM_THREADS: "3"
+ profiles:
+ - demo
+ volumes:
+ - "secret-data:/var/run/secrets:ro"
+ restart: unless-stopped
+
+ postgres:
+ image: "postgres:${POSTGRES_TAG}"
+ environment:
+ << : *dbConnectionCfg
+ healthcheck:
+ test: [ "CMD-SHELL", "pg_isready -U ${DB_USERNAME} -d ${DB_NAME}" ]
+ interval: 5s
+ timeout: 3s
+ retries: 3
+ ports:
+ - "5432:5432"
+ volumes:
+ - "postgres-data:/var/lib/postgresql/data"
+ restart: unless-stopped
+
+volumes:
+ apiserver-data: { }
+ secret-data: { }
+ postgres-data: { }
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index 123a440f9..000000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,395 +0,0 @@
-# This file is part of Dependency-Track.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# Copyright (c) OWASP Foundation. All Rights Reserved.
-services:
- notification-publisher:
- image: ghcr.io/dependencytrack/hyades-notification-publisher:snapshot-native
- depends_on:
- postgres:
- condition: service_healthy
- redpanda:
- condition: service_healthy
- initializer:
- condition: service_completed_successfully
- secret-init:
- condition: service_completed_successfully
- environment:
- KAFKA_BOOTSTRAP_SERVERS: "dt-redpanda:29092"
- QUARKUS_DATASOURCE_JDBC_URL: "jdbc:postgresql://dt-postgres:5432/dtrack"
- QUARKUS_DATASOURCE_USERNAME: "dtrack"
- QUARKUS_DATASOURCE_PASSWORD: "dtrack"
- SECRET_KEY_PATH: "/var/run/secrets/secret.key"
- ports:
- # Dynamic host port binding to allow for scaling of the service.
- # Scaling with Compose doesn't work when assigning static host ports.
- - "8090"
- profiles:
- - demo
- volumes:
- - "secret-data:/var/run/secrets:ro"
- restart: unless-stopped
-
- repo-meta-analyzer:
- image: ghcr.io/dependencytrack/hyades-repository-meta-analyzer:snapshot-native
- depends_on:
- postgres:
- condition: service_healthy
- redpanda:
- condition: service_healthy
- initializer:
- condition: service_completed_successfully
- secret-init:
- condition: service_completed_successfully
- environment:
- KAFKA_BOOTSTRAP_SERVERS: "dt-redpanda:29092"
- KAFKA_STREAMS_NUM_STREAM_THREADS: "6" # Default number of input partitions is 6
- QUARKUS_DATASOURCE_JDBC_URL: "jdbc:postgresql://dt-postgres:5432/dtrack"
- QUARKUS_DATASOURCE_USERNAME: "dtrack"
- QUARKUS_DATASOURCE_PASSWORD: "dtrack"
- SECRET_KEY_PATH: "/var/run/secrets/secret.key"
- ports:
- # Dynamic host port binding to allow for scaling of the service.
- # Scaling with Compose doesn't work when assigning static host ports.
- - "8091"
- profiles:
- - demo
- volumes:
- - "secret-data:/var/run/secrets:ro"
- restart: unless-stopped
-
- vuln-analyzer:
- image: ghcr.io/dependencytrack/hyades-vulnerability-analyzer:snapshot-native
- depends_on:
- postgres:
- condition: service_healthy
- redpanda:
- condition: service_healthy
- initializer:
- condition: service_completed_successfully
- environment:
- KAFKA_BOOTSTRAP_SERVERS: "dt-redpanda:29092"
- KAFKA_STREAMS_NUM_STREAM_THREADS: "12" # Default number of input partitions is 12
- QUARKUS_DATASOURCE_JDBC_URL: "jdbc:postgresql://dt-postgres:5432/dtrack"
- QUARKUS_DATASOURCE_USERNAME: "dtrack"
- QUARKUS_DATASOURCE_PASSWORD: "dtrack"
- SCANNER_INTERNAL_ENABLED: "true"
- # SCANNER_INTERNAL_TOPIC_PARTITIONS: "3"
- SCANNER_OSSINDEX_ENABLED: "true"
- # SCANNER_OSSINDEX_TOPIC_PARTITIONS: "3"
- # SCANNER_OSSINDEX_API_USERNAME: "email@example.com"
- # SCANNER_OSSINDEX_API_TOKEN: "your-token"
- # SCANNER_SNYK_ENABLED: "true"
- # SCANNER_SNYK_TOPIC_PARTITIONS: "3"
- # SCANNER_SNYK_API_ORG_ID: "your-org-id"
- # SCANNER_SNYK_API_TOKENS: "your-token-1,your-token-2"
- ports:
- # Dynamic host port binding to allow for scaling of the service.
- # Scaling with Compose doesn't work when assigning static host ports.
- - "8092"
- profiles:
- - demo
- restart: unless-stopped
-
- mirror-service:
- image: ghcr.io/dependencytrack/hyades-mirror-service:snapshot-native
- depends_on:
- postgres:
- condition: service_healthy
- redpanda:
- condition: service_healthy
- initializer:
- condition: service_completed_successfully
- secret-init:
- condition: service_completed_successfully
- environment:
- KAFKA_BOOTSTRAP_SERVERS: "dt-redpanda:29092"
- KAFKA_STREAMS_NUM_STREAM_THREADS: "3"
- QUARKUS_DATASOURCE_JDBC_URL: "jdbc:postgresql://dt-postgres:5432/dtrack"
- QUARKUS_DATASOURCE_USERNAME: "dtrack"
- QUARKUS_DATASOURCE_PASSWORD: "dtrack"
- SECRET_KEY_PATH: "/var/run/secrets/secret.key"
- ports:
- # Dynamic host port binding to allow for scaling of the service.
- # Scaling with Compose doesn't work when assigning static host ports.
- - "8093"
- profiles:
- - demo
- volumes:
- - "secret-data:/var/run/secrets:ro"
- restart: unless-stopped
-
- initializer:
- image: ghcr.io/dependencytrack/hyades-apiserver:snapshot
- container_name: dt-initializer
- depends_on:
- postgres:
- condition: service_healthy
- environment:
- EXTRA_JAVA_OPTIONS: "-Xmx256m"
- ALPINE_DATABASE_URL: "jdbc:postgresql://dt-postgres:5432/dtrack"
- ALPINE_DATABASE_USERNAME: "dtrack"
- ALPINE_DATABASE_PASSWORD: "dtrack"
- ALPINE_DATABASE_POOL_ENABLED: "false"
- INIT_TASKS_ENABLED: "true"
- INIT_AND_EXIT: "true"
- restart: on-failure
-
- apiserver:
- image: ghcr.io/dependencytrack/hyades-apiserver:snapshot
- container_name: dt-apiserver
- depends_on:
- postgres:
- condition: service_healthy
- redpanda:
- condition: service_healthy
- initializer:
- condition: service_completed_successfully
- secret-init:
- condition: service_completed_successfully
- environment:
- # Limit maximum heap size to 2GB.
- # Default would be 90% of available memory,
- # which can cause problems on some workstations.
- # For production deployments, the default should be used.
- EXTRA_JAVA_OPTIONS: "-Xmx2g"
- ALPINE_DATABASE_URL: "jdbc:postgresql://dt-postgres:5432/dtrack?reWriteBatchedInserts=true"
- ALPINE_DATABASE_USERNAME: "dtrack"
- ALPINE_DATABASE_PASSWORD: "dtrack"
- ALPINE_METRICS_ENABLED: "true"
- ALPINE_SECRET_KEY_PATH: "/var/run/secrets/secret.key"
- KAFKA_BOOTSTRAP_SERVERS: "dt-redpanda:29092"
- INTEGRITY_CHECK_ENABLED: "true"
- INIT_TASKS_ENABLED: "false"
- ports:
- - "127.0.0.1:8080:8080"
- volumes:
- - "apiserver-data:/data"
- - "secret-data:/var/run/secrets:ro"
- profiles:
- - demo
- restart: unless-stopped
-
- frontend:
- image: ghcr.io/dependencytrack/hyades-frontend:snapshot
- container_name: dt-frontend
- environment:
- API_BASE_URL: "http://localhost:8080"
- ports:
- - "127.0.0.1:8081:8080"
- profiles:
- - demo
- restart: unless-stopped
-
- postgres:
- image: postgres:16-alpine
- container_name: dt-postgres
- environment:
- POSTGRES_DB: "dtrack"
- POSTGRES_USER: "dtrack"
- POSTGRES_PASSWORD: "dtrack"
- healthcheck:
- test: [ "CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}" ]
- interval: 5s
- timeout: 3s
- retries: 3
- ports:
- - "5432:5432"
- volumes:
- - "postgres-data:/var/lib/postgresql/data"
- restart: unless-stopped
-
- redpanda:
- image: docker.redpanda.com/vectorized/redpanda:v24.2.2
- container_name: dt-redpanda
- command:
- - redpanda
- - start
- - --smp
- - '1'
- - --reserve-memory
- - 0M
- - --memory
- - 512M
- - --overprovisioned
- - --node-id
- - '0'
- - --kafka-addr
- - PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092,MINIKUBE://0.0.0.0:9093
- - --advertise-kafka-addr
- - PLAINTEXT://dt-redpanda:29092,OUTSIDE://localhost:9092,MINIKUBE://host.minikube.internal:9093
- - --pandaproxy-addr
- - PLAINTEXT://0.0.0.0:28082,OUTSIDE://0.0.0.0:8082
- - --advertise-pandaproxy-addr
- - PLAINTEXT://dt-redpanda:28082,OUTSIDE://localhost:8082
- healthcheck:
- test: [ "CMD-SHELL", "rpk", "cluster", "health", "--exit-when-healthy" ]
- interval: 5s
- timeout: 3s
- retries: 3
- ports:
- # Kafka API (for use from localhost)
- - "9092:9092"
- # Kafka API (for use from minikube)
- - "9093:9093"
- volumes:
- - "redpanda-data:/var/lib/redpanda/data"
- restart: unless-stopped
-
- redpanda-init:
- image: docker.redpanda.com/vectorized/redpanda:v24.2.2
- container_name: dt-redpanda-init
- depends_on:
- redpanda:
- condition: service_healthy
- entrypoint: "/bin/bash"
- command: "/tmp/create-topics.sh"
- user: "0" # Ensure user can read create-topics.sh
- environment:
- REDPANDA_BROKERS: "dt-redpanda:29092"
- # KAFKA_TOPIC_PREFIX: ""
- # NOTIFICATION_TOPICS_PARTITIONS: "3"
- # NOTIFICATION_TOPICS_RETENTION_MS: "43200000" # 12h
- # REPO_META_ANALYSIS_TOPICS_PARTITIONS: "3"
- # REPO_META_ANALYSIS_TOPICS_RETENTION_MS: "43200000" # 12h
- # VULN_ANALYSIS_TOPICS_PARTITIONS: "3"
- # VULN_ANALYSIS_TOPICS_RETENTION_MS: "43200000" # 12h
- # VULN_MIRROR_TOPICS_PARTITIONS: "3"
- # VULN_MIRROR_TOPICS_RETENTION_MS: "43200000" # 12h
- volumes:
- - "./scripts/create-topics.sh:/tmp/create-topics.sh:ro"
- restart: on-failure
-
- redpanda-console:
- image: docker.redpanda.com/vectorized/console:v2.7.0
- container_name: dt-redpanda-console
- entrypoint: "/bin/sh"
- command: "-c 'echo \"$$CONSOLE_CONFIG_FILE\" > \"$$CONFIG_FILEPATH\"; /app/console'"
- depends_on:
- redpanda:
- condition: service_healthy
- environment:
- CONFIG_FILEPATH: "/tmp/config.yml"
- CONSOLE_CONFIG_FILE: |
- kafka:
- brokers: ["redpanda:29092"]
- protobuf:
- enabled: true
- mappings:
- - topicName: dtrack.notification.analyzer
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.bom
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.configuration
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.datasource-mirroring
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.file-system
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.integration
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.new-vulnerability
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.new-vulnerable-dependency
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.policy-violation
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.project-audit-change
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.project-created
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.project-vuln-analysis-complete
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.repository
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.notification.vex
- valueProtoType: org.dependencytrack.notification.v1.Notification
- - topicName: dtrack.repo-meta-analysis.component
- valueProtoType: org.dependencytrack.repometaanalysis.v1.AnalysisCommand
- - topicName: dtrack.repo-meta-analysis.result
- valueProtoType: org.dependencytrack.repometaanalysis.v1.AnalysisResult
- - topicName: dtrack.vuln-analysis.component
- keyProtoType: org.dependencytrack.vulnanalysis.v1.ScanKey
- valueProtoType: org.dependencytrack.vulnanalysis.v1.ScanCommand
- - topicName: dtrack.vuln-analysis.scanner.result
- keyProtoType: org.dependencytrack.vulnanalysis.v1.ScanKey
- valueProtoType: org.dependencytrack.vulnanalysis.v1.ScannerResult
- - topicName: dtrack.vuln-analysis.result
- keyProtoType: org.dependencytrack.vulnanalysis.v1.ScanKey
- valueProtoType: org.dependencytrack.vulnanalysis.v1.ScanResult
- - topicName: dtrack.vuln-analysis.result.processed
- valueProtoType: org.dependencytrack.vulnanalysis.v1.ScanResult
- - topicName: dtrack.vulnerability
- valueProtoType: org.cyclonedx.v1_6.Bom
- - topicName: dtrack.epss
- valueProtoType: org.dependencytrack.mirror.v1.EpssItem
- - topicName: dtrack.notification.user
- valueProtoType: org.dependencytrack.notification.v1.Notification
- fileSystem:
- enabled: true
- paths: ["/etc/protos"]
- refreshInterval: 5m
- ports:
- - "127.0.0.1:28080:8080"
- volumes:
- - "./proto/src/main/proto:/etc/protos:ro"
- restart: unless-stopped
-
- secret-init:
- image: alpine:latest
- command: "/bin/sh -c 'if [ ! -f /tmp/secret/secret.key ]; then head -c 32 /dev/urandom > /tmp/secret/secret.key; fi'"
- profiles:
- - demo
- volumes:
- - "secret-data:/tmp/secret"
- restart: on-failure
-
- prometheus:
- image: prom/prometheus:v2.53.1
- container_name: dt-prometheus
- ports:
- - "127.0.0.1:9090:9090"
- volumes:
- - "./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro"
- - "prometheus-data:/prometheus"
- profiles:
- - monitoring
- restart: unless-stopped
-
- grafana:
- image: grafana/grafana-oss:11.1.3
- container_name: dt-grafana
- depends_on:
- - prometheus
- environment:
- GF_SECURITY_ADMIN_USER: "admin"
- GF_SECURITY_ADMIN_PASSWORD: "admin"
- ports:
- - "127.0.0.1:3000:3000"
- volumes:
- - "grafana-data:/var/lib/grafana"
- - "./monitoring/grafana/dashboards:/etc/dashboards:ro"
- - "./monitoring/grafana/provisioning:/etc/grafana/provisioning:ro"
- profiles:
- - monitoring
- restart: unless-stopped
-
-volumes:
- apiserver-data: { }
- secret-data: { }
- postgres-data: { }
- redpanda-data: { }
- grafana-data: { }
- prometheus-data: { }
diff --git a/e2e/pom.xml b/e2e/pom.xml
index 9c38104ad..a619ac920 100644
--- a/e2e/pom.xml
+++ b/e2e/pom.xml
@@ -86,6 +86,11 @@
3.9.1
test
+
+ org.testcontainers
+ kafka
+ test
+
org.testcontainers
postgresql
diff --git a/e2e/src/test/java/org/dependencytrack/e2e/AbstractE2ET.java b/e2e/src/test/java/org/dependencytrack/e2e/AbstractE2ET.java
index 5f432ec7e..0b3163c4c 100644
--- a/e2e/src/test/java/org/dependencytrack/e2e/AbstractE2ET.java
+++ b/e2e/src/test/java/org/dependencytrack/e2e/AbstractE2ET.java
@@ -31,7 +31,6 @@
import org.junit.jupiter.api.BeforeEach;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.testcontainers.containers.BindMode;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.PostgreSQLContainer;
@@ -39,6 +38,7 @@
import org.testcontainers.containers.startupcheck.OneShotStartupCheckStrategy;
import org.testcontainers.containers.wait.strategy.Wait;
import org.testcontainers.images.PullPolicy;
+import org.testcontainers.kafka.KafkaContainer;
import org.testcontainers.utility.DockerImageName;
import org.testcontainers.utility.MountableFile;
@@ -54,23 +54,37 @@
public class AbstractE2ET {
- protected static DockerImageName POSTGRES_IMAGE = DockerImageName.parse("postgres:15-alpine");
- protected static DockerImageName REDPANDA_IMAGE = DockerImageName.parse("docker.redpanda.com/vectorized/redpanda:v24.2.2");
- protected static DockerImageName API_SERVER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-apiserver")
- .withTag(Optional.ofNullable(System.getenv("APISERVER_VERSION")).orElse("snapshot"));
- protected static DockerImageName MIRROR_SERVICE_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-mirror-service")
- .withTag(Optional.ofNullable(System.getenv("HYADES_VERSION")).orElse("snapshot"));
- protected static DockerImageName NOTIFICATION_PUBLISHER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-notification-publisher")
- .withTag(Optional.ofNullable(System.getenv("HYADES_VERSION")).orElse("snapshot"));
- protected static DockerImageName REPO_META_ANALYZER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-repository-meta-analyzer")
- .withTag(Optional.ofNullable(System.getenv("HYADES_VERSION")).orElse("snapshot"));
- protected static DockerImageName VULN_ANALYZER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-vulnerability-analyzer")
- .withTag(Optional.ofNullable(System.getenv("HYADES_VERSION")).orElse("snapshot"));
+ private enum KafkaProvider {
+ APACHE,
+ APACHE_NATIVE,
+ REDPANDA,
+ TANSU
+ }
+
+ private static final String HYADES_APISERVER_TAG = Optional.ofNullable(System.getenv("HYADES_APISERVER_TAG")).orElse("local");
+ private static final String HYADES_TAG = Optional.ofNullable(System.getenv("HYADES_TAG")).orElse("snapshot");
+ private static final String KAFKA_APACHE_TAG = Optional.ofNullable(System.getenv("KAFKA_APACHE_TAG")).orElse("3.8.0");
+ private static final String KAFKA_REDPANDA_TAG = Optional.ofNullable(System.getenv("KAFKA_REDPANDA_TAG")).orElse("v24.2.2");
+ private static final String KAFKA_TANSU_TAG = Optional.ofNullable(System.getenv("TANSU_TAG")).orElse("latest");
+ private static final String POSTGRES_TAG = Optional.ofNullable(System.getenv("POSTGRES_TAG")).orElse("15-alpine");
+
+ private static final DockerImageName KAFKA_APACHE_IMAGE = DockerImageName.parse("apache/kafka").withTag(KAFKA_APACHE_TAG);
+ private static final DockerImageName KAFKA_APACHE_NATIVE_IMAGE = DockerImageName.parse("apache/kafka-native").withTag(KAFKA_APACHE_TAG);
+ private static final DockerImageName KAFKA_REDPANDA_IMAGE = DockerImageName.parse("docker.redpanda.com/vectorized/redpanda").withTag(KAFKA_REDPANDA_TAG);
+ private static final DockerImageName KAFKA_TANSU_IMAGE = DockerImageName.parse("ghcr.io/tansu-io/tansu").withTag(KAFKA_TANSU_TAG);
+ private static final DockerImageName POSTGRES_IMAGE = DockerImageName.parse("postgres").withTag(POSTGRES_TAG);
+ private static final DockerImageName API_SERVER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-apiserver").withTag(HYADES_APISERVER_TAG);
+ private static final DockerImageName MIRROR_SERVICE_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-mirror-service").withTag(HYADES_TAG);
+ private static final DockerImageName NOTIFICATION_PUBLISHER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-notification-publisher").withTag(HYADES_TAG);
+ private static final DockerImageName REPO_META_ANALYZER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-repository-meta-analyzer").withTag(HYADES_TAG);
+ private static final DockerImageName VULN_ANALYZER_IMAGE = DockerImageName.parse("ghcr.io/dependencytrack/hyades-vulnerability-analyzer").withTag(HYADES_TAG);
+
+ private static final KafkaProvider KAFKA_PROVIDER = Optional.ofNullable(System.getenv("KAFKA_PROVIDER")).map(KafkaProvider::valueOf).orElse(KafkaProvider.APACHE_NATIVE);
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final Network internalNetwork = Network.newNetwork();
protected PostgreSQLContainer> postgresContainer;
- protected GenericContainer> redpandaContainer;
+ protected GenericContainer> kafkaContainer;
protected GenericContainer> apiServerContainer;
protected GenericContainer> mirrorServiceContainer;
protected GenericContainer> notificationPublisherContainer;
@@ -82,10 +96,13 @@ public class AbstractE2ET {
@BeforeEach
void beforeEach() throws Exception {
postgresContainer = createPostgresContainer();
- redpandaContainer = createRedpandaContainer();
- deepStart(postgresContainer, redpandaContainer).join();
+ kafkaContainer = switch (KAFKA_PROVIDER) {
+ case APACHE, APACHE_NATIVE -> createApacheKafkaContainer();
+ case REDPANDA -> createRedpandaContainer();
+ case TANSU -> createTansuContainer();
+ };
+ deepStart(postgresContainer, kafkaContainer).join();
- initializeRedpanda();
generateSecretKey();
runInitializer();
@@ -115,30 +132,60 @@ private PostgreSQLContainer> createPostgresContainer() {
.withNetwork(internalNetwork);
}
+ @SuppressWarnings("resource")
+ protected KafkaContainer createApacheKafkaContainer() {
+ final DockerImageName imageName = switch (KAFKA_PROVIDER) {
+ case APACHE -> KAFKA_APACHE_IMAGE;
+ case APACHE_NATIVE -> KAFKA_APACHE_NATIVE_IMAGE;
+ default -> throw new IllegalArgumentException();
+ };
+ return new KafkaContainer(imageName)
+ .withNetworkAliases("kafka")
+ .withNetwork(internalNetwork);
+ }
+
@SuppressWarnings("resource")
private GenericContainer> createRedpandaContainer() {
- return new GenericContainer<>(REDPANDA_IMAGE)
+ return new GenericContainer<>(KAFKA_REDPANDA_IMAGE)
.withCommand(
"redpanda", "start", "--smp", "1", "--mode", "dev-container",
"--reserve-memory", "0M", "--memory", "512M", "--overprovisioned",
- "--kafka-addr", "PLAINTEXT://0.0.0.0:29092",
- "--advertise-kafka-addr", "PLAINTEXT://redpanda:29092"
+ "--kafka-addr", "PLAINTEXT://0.0.0.0:9093",
+ "--advertise-kafka-addr", "PLAINTEXT://kafka:9093"
)
.waitingFor(Wait.forLogMessage(".*Started Kafka API server.*", 1))
- .withNetworkAliases("redpanda")
+ .withNetworkAliases("kafka")
.withNetwork(internalNetwork);
}
@SuppressWarnings("resource")
- private void initializeRedpanda() {
- new GenericContainer<>(REDPANDA_IMAGE)
- .withCreateContainerCmdModifier(cmd -> cmd.withUser("0").withEntrypoint("/bin/bash"))
- .withCommand("/tmp/create-topics.sh")
- .withEnv("REDPANDA_BROKERS", "redpanda:29092")
- .withFileSystemBind("../scripts/create-topics.sh", "/tmp/create-topics.sh", BindMode.READ_ONLY)
- .waitingFor(Wait.forLogMessage(".*All topics created successfully.*", 1))
+ private GenericContainer> createTansuContainer() {
+ final var tansuInitContainer = new GenericContainer<>(POSTGRES_IMAGE)
+ .withCreateContainerCmdModifier(cmd -> cmd.withEntrypoint("/bin/bash"))
+ .withCommand("-c", """
+ ((psql -d tansu -q -c "select 1 from cluster limit 1" >/dev/null 2>/dev/null) && echo "Already initialized") \
+ || ((psql -c "CREATE DATABASE tansu") && (wget -q -O- https://raw.githubusercontent.com/tansu-io/tansu/refs/heads/main/work-dir/initdb.d/010-schema.sql | psql -d tansu))\
+ """)
+ .withEnv("PGHOST", "postgres")
+ .withEnv("PGUSER", "dtrack")
+ .withEnv("PGPASSWORD", "dtrack")
+ .withStartupCheckStrategy(new OneShotStartupCheckStrategy())
.withNetwork(internalNetwork)
- .start();
+ .dependsOn(postgresContainer);
+
+ return new GenericContainer<>(KAFKA_TANSU_IMAGE)
+ .withCommand(
+ "--kafka-cluster-id", "RvQwrYegSUCkIPkaiAZQlQ",
+ "--kafka-node-id", "1",
+ "--kafka-listener-url", "tcp://0.0.0.0:9093",
+ "--kafka-advertised-listener-url", "tcp://kafka:9093",
+ "--storage-engine", "pg=postgres://dtrack:dtrack@postgres/tansu",
+ "--work-dir", "/data")
+ .withExposedPorts(9093)
+ .waitingFor(Wait.forListeningPorts(9093))
+ .withNetworkAliases("kafka")
+ .withNetwork(internalNetwork)
+ .dependsOn(tansuInitContainer);
}
private void generateSecretKey() throws Exception {
@@ -157,11 +204,14 @@ private void generateSecretKey() throws Exception {
private void runInitializer() {
new GenericContainer<>(API_SERVER_IMAGE)
.withImagePullPolicy("local".equals(API_SERVER_IMAGE.getVersionPart()) ? PullPolicy.defaultPolicy() : PullPolicy.alwaysPull())
- .withEnv("EXTRA_JAVA_OPTIONS", "-Xmx256m")
+ .withEnv("JAVA_OPTIONS", "-Xmx256m -XX:+UseSerialGC -XX:TieredStopAtLevel=1")
.withEnv("ALPINE_DATABASE_URL", "jdbc:postgresql://postgres:5432/dtrack")
.withEnv("ALPINE_DATABASE_USERNAME", "dtrack")
.withEnv("ALPINE_DATABASE_PASSWORD", "dtrack")
+ .withEnv("ALPINE_DATABASE_POOL_ENABLED", "false")
+ .withEnv("KAFKA_BOOTSTRAP_SERVERS", "kafka:9093")
.withEnv("INIT_TASKS_ENABLED", "true")
+ .withEnv("INIT_TASKS_KAFKA_TOPICS_ENABLED", "true")
.withEnv("INIT_AND_EXIT", "true")
.withLogConsumer(new Slf4jLogConsumer(LoggerFactory.getLogger("initializer")))
.withStartupCheckStrategy(new OneShotStartupCheckStrategy())
@@ -177,7 +227,7 @@ private GenericContainer> createApiServerContainer() {
.withEnv("ALPINE_DATABASE_URL", "jdbc:postgresql://postgres:5432/dtrack?reWriteBatchedInserts=true")
.withEnv("ALPINE_DATABASE_USERNAME", "dtrack")
.withEnv("ALPINE_DATABASE_PASSWORD", "dtrack")
- .withEnv("KAFKA_BOOTSTRAP_SERVERS", "redpanda:29092")
+ .withEnv("KAFKA_BOOTSTRAP_SERVERS", "kafka:9093")
.withEnv("INIT_TASKS_ENABLED", "false")
.withEnv("ALPINE_SECRET_KEY_PATH", "/var/run/secrets/secret.key")
.withCopyFileToContainer(
@@ -201,7 +251,7 @@ private GenericContainer> createMirrorServiceContainer() {
final var container = new GenericContainer<>(MIRROR_SERVICE_IMAGE)
.withImagePullPolicy("local".equals(MIRROR_SERVICE_IMAGE.getVersionPart()) ? PullPolicy.defaultPolicy() : PullPolicy.alwaysPull())
.withEnv("JAVA_OPTS", "-Xmx256m")
- .withEnv("KAFKA_BOOTSTRAP_SERVERS", "redpanda:29092")
+ .withEnv("KAFKA_BOOTSTRAP_SERVERS", "kafka:9093")
.withEnv("QUARKUS_DATASOURCE_JDBC_URL", "jdbc:postgresql://postgres:5432/dtrack")
.withEnv("QUARKUS_DATASOURCE_USERNAME", "dtrack")
.withEnv("QUARKUS_DATASOURCE_PASSWORD", "dtrack")
@@ -226,7 +276,7 @@ private GenericContainer> createNotificationPublisherContainer() {
final var container = new GenericContainer<>(NOTIFICATION_PUBLISHER_IMAGE)
.withImagePullPolicy("local".equals(NOTIFICATION_PUBLISHER_IMAGE.getVersionPart()) ? PullPolicy.defaultPolicy() : PullPolicy.alwaysPull())
.withEnv("JAVA_OPTS", "-Xmx256m")
- .withEnv("KAFKA_BOOTSTRAP_SERVERS", "redpanda:29092")
+ .withEnv("KAFKA_BOOTSTRAP_SERVERS", "kafka:9093")
.withEnv("QUARKUS_DATASOURCE_JDBC_URL", "jdbc:postgresql://postgres:5432/dtrack")
.withEnv("QUARKUS_DATASOURCE_USERNAME", "dtrack")
.withEnv("QUARKUS_DATASOURCE_PASSWORD", "dtrack")
@@ -251,7 +301,7 @@ private GenericContainer> createRepoMetaAnalyzerContainer() {
final var container = new GenericContainer<>(REPO_META_ANALYZER_IMAGE)
.withImagePullPolicy("local".equals(REPO_META_ANALYZER_IMAGE.getVersionPart()) ? PullPolicy.defaultPolicy() : PullPolicy.alwaysPull())
.withEnv("JAVA_OPTS", "-Xmx256m")
- .withEnv("QUARKUS_KAFKA_STREAMS_BOOTSTRAP_SERVERS", "redpanda:29092")
+ .withEnv("QUARKUS_KAFKA_STREAMS_BOOTSTRAP_SERVERS", "kafka:9093")
.withEnv("QUARKUS_DATASOURCE_JDBC_URL", "jdbc:postgresql://postgres:5432/dtrack")
.withEnv("QUARKUS_DATASOURCE_USERNAME", "dtrack")
.withEnv("QUARKUS_DATASOURCE_PASSWORD", "dtrack")
@@ -276,7 +326,7 @@ private GenericContainer> createVulnAnalyzerContainer() {
final var container = new GenericContainer<>(VULN_ANALYZER_IMAGE)
.withImagePullPolicy("local".equals(VULN_ANALYZER_IMAGE.getVersionPart()) ? PullPolicy.defaultPolicy() : PullPolicy.alwaysPull())
.withEnv("JAVA_OPTS", "-Xmx256m")
- .withEnv("QUARKUS_KAFKA_STREAMS_BOOTSTRAP_SERVERS", "redpanda:29092")
+ .withEnv("QUARKUS_KAFKA_STREAMS_BOOTSTRAP_SERVERS", "kafka:9093")
.withEnv("QUARKUS_DATASOURCE_JDBC_URL", "jdbc:postgresql://postgres:5432/dtrack")
.withEnv("QUARKUS_DATASOURCE_USERNAME", "dtrack")
.withEnv("QUARKUS_DATASOURCE_PASSWORD", "dtrack")
@@ -342,7 +392,7 @@ void afterEach() {
Optional.ofNullable(notificationPublisherContainer).ifPresent(GenericContainer::stop);
Optional.ofNullable(mirrorServiceContainer).ifPresent(GenericContainer::stop);
Optional.ofNullable(apiServerContainer).ifPresent(GenericContainer::stop);
- Optional.ofNullable(redpandaContainer).ifPresent(GenericContainer::stop);
+ Optional.ofNullable(kafkaContainer).ifPresent(GenericContainer::stop);
Optional.ofNullable(postgresContainer).ifPresent(GenericContainer::stop);
Optional.ofNullable(internalNetwork).ifPresent(Network::close);
diff --git a/e2e/src/test/java/org/dependencytrack/e2e/VulnerabilityPolicyE2ET.java b/e2e/src/test/java/org/dependencytrack/e2e/VulnerabilityPolicyE2ET.java
index 1fdd8d5be..857035266 100644
--- a/e2e/src/test/java/org/dependencytrack/e2e/VulnerabilityPolicyE2ET.java
+++ b/e2e/src/test/java/org/dependencytrack/e2e/VulnerabilityPolicyE2ET.java
@@ -158,7 +158,7 @@ protected void customizeApiServerContainer(final GenericContainer> container)
// Configure policy bundle fetching to occur 5s after startup,
// and every minute from then on.
.withEnv("TASK_SCHEDULER_INITIAL_DELAY", "5000")
- .withEnv("TASK_CRON_VULNERABILITY_POLICY_BUNDLE_FETCH", "* * * * *");
+ .withEnv("TASK_VULNERABILITY_POLICY_FETCH_CRON", "* * * * *");
}
@Override
diff --git a/monitoring/redpanda-console.yml b/monitoring/redpanda-console.yml
new file mode 100644
index 000000000..0796d814d
--- /dev/null
+++ b/monitoring/redpanda-console.yml
@@ -0,0 +1,59 @@
+---
+kafka:
+ brokers: ["kafka:9092"]
+ protobuf:
+ enabled: true
+ mappings:
+ - topicName: dtrack.notification.analyzer
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.bom
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.configuration
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.datasource-mirroring
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.file-system
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.integration
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.new-vulnerability
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.new-vulnerable-dependency
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.policy-violation
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.project-audit-change
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.project-created
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.project-vuln-analysis-complete
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.repository
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.notification.vex
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ - topicName: dtrack.repo-meta-analysis.component
+ valueProtoType: org.dependencytrack.repometaanalysis.v1.AnalysisCommand
+ - topicName: dtrack.repo-meta-analysis.result
+ valueProtoType: org.dependencytrack.repometaanalysis.v1.AnalysisResult
+ - topicName: dtrack.vuln-analysis.component
+ keyProtoType: org.dependencytrack.vulnanalysis.v1.ScanKey
+ valueProtoType: org.dependencytrack.vulnanalysis.v1.ScanCommand
+ - topicName: dtrack.vuln-analysis.scanner.result
+ keyProtoType: org.dependencytrack.vulnanalysis.v1.ScanKey
+ valueProtoType: org.dependencytrack.vulnanalysis.v1.ScannerResult
+ - topicName: dtrack.vuln-analysis.result
+ keyProtoType: org.dependencytrack.vulnanalysis.v1.ScanKey
+ valueProtoType: org.dependencytrack.vulnanalysis.v1.ScanResult
+ - topicName: dtrack.vuln-analysis.result.processed
+ valueProtoType: org.dependencytrack.vulnanalysis.v1.ScanResult
+ - topicName: dtrack.vulnerability
+ valueProtoType: org.cyclonedx.v1_6.Bom
+ - topicName: dtrack.epss
+ valueProtoType: org.dependencytrack.mirror.v1.EpssItem
+ - topicName: dtrack.notification.user
+ valueProtoType: org.dependencytrack.notification.v1.Notification
+ fileSystem:
+ enabled: true
+ paths: ["/etc/protos"]
+ refreshInterval: 5m
\ No newline at end of file
diff --git a/scripts/create-topics.sh b/scripts/create-topics.sh
deleted file mode 100644
index 721ff6c4f..000000000
--- a/scripts/create-topics.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env bash
-
-# This file is part of Dependency-Track.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# SPDX-License-Identifier: Apache-2.0
-# Copyright (c) OWASP Foundation. All Rights Reserved.
-
-set -euxo pipefail
-
-function create_topic() {
- topic_config_flags=""
- argument_configs=($3)
- for cfg in "${argument_configs[@]}"; do
- topic_config_flags="${topic_config_flags} --topic-config ${cfg}"
- done
-
- if ! output=$(rpk topic create "$1" --partitions "$2" $topic_config_flags); then
- # Don't fail the script when the rpk command failed because the topic already exists.
- if [[ "$output" != *"TOPIC_ALREADY_EXISTS"* ]]; then
- exit 2
- fi
- fi
-}
-
-# Wait for Redpanda to become available
-rpk cluster health --watch --exit-when-healthy \
- --api-urls "$(echo "$REDPANDA_BROKERS" | sed -E 's/:[[:digit:]]+/:9644/g')"
-
-notification_topics=(
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.analyzer"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.bom"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.configuration"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.datasource-mirroring"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.file-system"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.integration"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.new-vulnerability"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.new-vulnerable-dependency"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.policy-violation"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.project-audit-change"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.project-created"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.project-vuln-analysis-complete"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.repository"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.vex"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.notification.user"
-)
-for topic_name in "${notification_topics[@]}"; do
- create_topic "$topic_name" "${NOTIFICATION_TOPICS_PARTITIONS:-1}" "retention.ms=${NOTIFICATION_TOPICS_RETENTION_MS:-43200000}"
-done
-
-repo_meta_analysis_topics=(
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.repo-meta-analysis.component"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.repo-meta-analysis.result"
-)
-for topic_name in "${repo_meta_analysis_topics[@]}"; do
- create_topic "$topic_name" "${REPO_META_ANALYSIS_TOPICS_PARTITIONS:-3}" "retention.ms=${REPO_META_ANALYSIS_TOPICS_RETENTION_MS:-43200000}"
-done
-
-vuln_analysis_topics=(
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vuln-analysis.component"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vuln-analysis.scanner.result"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vuln-analysis.result"
- "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vuln-analysis.result.processed"
-)
-for topic_name in "${vuln_analysis_topics[@]}"; do
- create_topic "$topic_name" "${VULN_ANALYSIS_TOPICS_PARTITIONS:-3}" "retention.ms=${VULN_ANALYSIS_TOPICS_RETENTION_MS:-43200000}"
-done
-
-create_topic "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vulnerability.mirror.command" "1" "retention.ms=${VULN_MIRROR_TOPICS_RETENTION_MS:-43200000}"
-create_topic "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vulnerability.mirror.state" "1" "cleanup.policy=compact segment.bytes=67108864 max.compaction.lag.ms=1"
-create_topic "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vulnerability.digest" "1" "cleanup.policy=compact segment.bytes=134217728"
-create_topic "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.vulnerability" "${VULN_MIRROR_TOPICS_PARTITIONS:-3}" "cleanup.policy=compact"
-create_topic "${DT_KAFKA_TOPIC_PREFIX:-}dtrack.epss" "${VULN_MIRROR_TOPICS_PARTITIONS:-3}" "cleanup.policy=compact"
-
-echo "All topics created successfully"
\ No newline at end of file