Skip to content

Commit

Permalink
Make BOM upload storage compression level configurable
Browse files Browse the repository at this point in the history
Signed-off-by: nscuro <[email protected]>
  • Loading branch information
nscuro committed Sep 10, 2024
1 parent 8315851 commit 0f88918
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 1 deletion.
1 change: 1 addition & 0 deletions src/main/java/org/dependencytrack/common/ConfigKey.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ public enum ConfigKey implements Config.Key {

TASK_SCHEDULER_INITIAL_DELAY("task.scheduler.initial.delay", "180000"),
TASK_SCHEDULER_POLLING_INTERVAL("task.scheduler.polling.interval", "60000"),
BOM_UPLOAD_STORAGE_COMPRESSION_LEVEL("bom.upload.storage.compression.level", "3"),
TMP_DELAY_BOM_PROCESSED_NOTIFICATION("tmp.delay.bom.processed.notification", "false"),
INTEGRITY_INITIALIZER_ENABLED("integrity.initializer.enabled", "false"),
INTEGRITY_CHECK_ENABLED("integrity.check.enabled", "false"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/
package org.dependencytrack.resources.v1;

import alpine.Config;
import alpine.common.logging.Logger;
import alpine.notification.Notification;
import alpine.notification.NotificationLevel;
Expand Down Expand Up @@ -88,6 +89,7 @@
import java.util.concurrent.atomic.AtomicBoolean;

import static java.util.function.Predicate.not;
import static org.dependencytrack.common.ConfigKey.BOM_UPLOAD_STORAGE_COMPRESSION_LEVEL;
import static org.dependencytrack.model.ConfigPropertyConstants.BOM_VALIDATION_ENABLED;

/**
Expand Down Expand Up @@ -542,8 +544,9 @@ private Response process(QueryManager qm, Project project, List<FormDataBodyPart
private void validateAndStoreBom(final UUID token, final byte[] bomBytes, final Project project) throws IOException {
validate(bomBytes, project);

final int compressionLevel = Config.getInstance().getPropertyAsInt(BOM_UPLOAD_STORAGE_COMPRESSION_LEVEL);
try (final BomUploadStorage storageProvider = PluginManager.getInstance().getExtension(BomUploadStorage.class)) {
storageProvider.storeBomCompressed(token, bomBytes, /* TODO: Make configurable */ 3);
storageProvider.storeBomCompressed(token, bomBytes, compressionLevel);
}
}

Expand Down
44 changes: 44 additions & 0 deletions src/main/resources/application.properties
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,11 @@ dt.kafka.topic.prefix=
# Refer to https://kafka.apache.org/documentation/#consumerconfigs for available options.
# alpine.kafka.processor.<name>.consumer.<consumer.config.name>=

# Defines the maximum number of threads to process records from the `dtrack.event.bom-uploaded` topic.
# <br/><br/>
# A value of `-1` will cause the concurrency to match the number of partitions in the input topic.
# Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions.
#
# @category: Kafka
# @type: integer
# @required
Expand Down Expand Up @@ -737,6 +742,11 @@ alpine.kafka.processor.bom.upload.consumer.group.id=dtrack-apiserver-processor
# @required
alpine.kafka.processor.bom.upload.consumer.auto.offset.reset=earliest

# Defines the maximum number of threads to process records from the `dtrack.vulnerability` topic.
# <br/><br/>
# A value of `-1` will cause the concurrency to match the number of partitions in the input topic.
# Parallelization is based on partitions. Concurrency can thus not exceed the number of partitions.
#
# @category: Kafka
# @type: integer
# @required
Expand Down Expand Up @@ -784,6 +794,11 @@ alpine.kafka.processor.vuln.mirror.consumer.auto.offset.reset=earliest
# @required
alpine.kafka.processor.epss.mirror.max.concurrency=-1

# Defines the maximum number of threads to process records from the `dtrack.epss` topic.
# <br/><br/>
# A value of `-1` will cause the concurrency to match the number of partitions in the input topic.
# Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions.
#
# @category: Kafka
# @type: enum
# @valid-values: [key, partition, unordered]
Expand Down Expand Up @@ -831,6 +846,11 @@ alpine.kafka.processor.epss.mirror.max.batch.size=500
# @required
alpine.kafka.processor.repo.meta.analysis.result.max.concurrency=-1

# Defines the maximum number of threads to process records from the `dtrack.repo-meta-analysis.result` topic.
# <br/><br/>
# A value of `-1` will cause the concurrency to match the number of partitions in the input topic.
# Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions.
#
# @category: Kafka
# @type: enum
# @valid-values: [key, partition, unordered]
Expand Down Expand Up @@ -873,6 +893,11 @@ alpine.kafka.processor.repo.meta.analysis.result.consumer.auto.offset.reset=earl
# @required
alpine.kafka.processor.vuln.scan.result.max.concurrency=-1

# Defines the maximum number of threads to process records from the `dtrack.vuln-analysis.result` topic.
# <br/><br/>
# A value of `-1` will cause the concurrency to match the number of partitions in the input topic.
# Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions.
#
# @category: Kafka
# @type: enum
# @valid-values: [key, partition, unordered]
Expand Down Expand Up @@ -915,6 +940,12 @@ alpine.kafka.processor.vuln.scan.result.consumer.auto.offset.reset=earliest
# @required
alpine.kafka.processor.vuln.scan.result.processed.max.batch.size=1000

# Defines the maximum number of threads to process records from the `dtrack.vuln-analysis.result.processed` topic.
# <br/><br/>
# A value of `-1` will cause the concurrency to match the number of partitions in the input topic.
# <br/><br/>
# Because the processing is optimized for batching, increasing this value past `1` is unlikely to yield better results.
#
# @category: Kafka
# @type: integer
# @required
Expand Down Expand Up @@ -1545,6 +1576,19 @@ task.workflow.maintenance.lock.max.duration=PT5M
# @required
task.workflow.maintenance.lock.min.duration=PT1M

# Defines the zstd compression level to use for BOM upload storage.
# <br/><br/>
# A high compression level can further reduce storage requirements,
# but will also require significantly more CPU to perform compression.
# If compression takes too long, BOM upload requests may time out,
# depending on client, server, and load balancer / reverse proxy configuration.
# Note that impact on CPU will be amplified if many BOMs are uploaded concurrently.
#
# @category: Storage
# @valid-values: [0..22]
# @required
bom.upload.storage.compression.level=3

# Defines the BOM upload storage extension to use.
# When null, an enabled extension will be chosen based on its priority.
# It is recommended to explicitly configure a extension for predictable behavior.
Expand Down

0 comments on commit 0f88918

Please sign in to comment.