Skip to content

Commit

Permalink
Add maintenance task to enforce BOM upload retention
Browse files Browse the repository at this point in the history
Signed-off-by: nscuro <[email protected]>
  • Loading branch information
nscuro committed Sep 10, 2024
1 parent 0f88918 commit 04d9efc
Show file tree
Hide file tree
Showing 12 changed files with 296 additions and 11 deletions.
1 change: 1 addition & 0 deletions src/main/java/org/dependencytrack/common/ConfigKey.java
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ public enum ConfigKey implements Config.Key {
TASK_SCHEDULER_INITIAL_DELAY("task.scheduler.initial.delay", "180000"),
TASK_SCHEDULER_POLLING_INTERVAL("task.scheduler.polling.interval", "60000"),
BOM_UPLOAD_STORAGE_COMPRESSION_LEVEL("bom.upload.storage.compression.level", "3"),
BOM_UPLOAD_STORAGE_RETENTION_DURATION("bom.upload.storage.retention.duration", "PT3H"),
TMP_DELAY_BOM_PROCESSED_NOTIFICATION("tmp.delay.bom.processed.notification", "false"),
INTEGRITY_INITIALIZER_ENABLED("integrity.initializer.enabled", "false"),
INTEGRITY_CHECK_ENABLED("integrity.check.enabled", "false"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import jakarta.servlet.ServletContextEvent;
import jakarta.servlet.ServletContextListener;
import org.dependencytrack.common.ConfigKey;
import org.dependencytrack.event.maintenance.BomUploadStorageMaintenanceEvent;
import org.dependencytrack.event.maintenance.ComponentMetadataMaintenanceEvent;
import org.dependencytrack.event.maintenance.MetricsMaintenanceEvent;
import org.dependencytrack.event.maintenance.TagMaintenanceEvent;
Expand All @@ -50,6 +51,7 @@
import org.dependencytrack.tasks.TaskScheduler;
import org.dependencytrack.tasks.VexUploadProcessingTask;
import org.dependencytrack.tasks.VulnerabilityAnalysisTask;
import org.dependencytrack.tasks.maintenance.BomUploadStorageMaintenanceTask;
import org.dependencytrack.tasks.maintenance.ComponentMetadataMaintenanceTask;
import org.dependencytrack.tasks.maintenance.MetricsMaintenanceTask;
import org.dependencytrack.tasks.maintenance.TagMaintenanceTask;
Expand Down Expand Up @@ -116,6 +118,7 @@ public void contextInitialized(final ServletContextEvent event) {

// Execute maintenance tasks on the single-threaded event service.
// This way, they are not blocked by, and don't block, actual processing tasks on the main event service.
EVENT_SERVICE_ST.subscribe(BomUploadStorageMaintenanceEvent.class, BomUploadStorageMaintenanceTask.class);
EVENT_SERVICE_ST.subscribe(ComponentMetadataMaintenanceEvent.class, ComponentMetadataMaintenanceTask.class);
EVENT_SERVICE_ST.subscribe(MetricsMaintenanceEvent.class, MetricsMaintenanceTask.class);
EVENT_SERVICE_ST.subscribe(TagMaintenanceEvent.class, TagMaintenanceTask.class);
Expand Down Expand Up @@ -157,6 +160,7 @@ public void contextDestroyed(final ServletContextEvent event) {
EVENT_SERVICE.unsubscribe(VulnerabilityPolicyFetchTask.class);
EVENT_SERVICE.shutdown(DRAIN_TIMEOUT_DURATION);

EVENT_SERVICE_ST.unsubscribe(BomUploadStorageMaintenanceTask.class);
EVENT_SERVICE_ST.unsubscribe(ComponentMetadataMaintenanceTask.class);
EVENT_SERVICE_ST.unsubscribe(MetricsMaintenanceTask.class);
EVENT_SERVICE_ST.unsubscribe(TagMaintenanceTask.class);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
/*
* This file is part of Dependency-Track.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) OWASP Foundation. All Rights Reserved.
*/
package org.dependencytrack.event.maintenance;

import alpine.event.framework.Event;

/**
* @since 5.6.0
*/
public class BomUploadStorageMaintenanceEvent implements Event {
}
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ private Response process(QueryManager qm, Project project, String encodedBomData
final var decodedInputStream = Base64.getDecoder().wrap(encodedInputStream);
final var byteOrderMarkInputStream = new BOMInputStream(decodedInputStream)) {
final byte[] bomBytes = IOUtils.toByteArray(byteOrderMarkInputStream);
validateAndStoreBom(bomUploadEvent.getChainIdentifier(), bomBytes);
validateAndStoreBom(bomUploadEvent.getChainIdentifier(), bomBytes, project);
} catch (IOException e) {
LOGGER.error("An unexpected error occurred while validating or storing a BOM uploaded to project: " + project.getUuid(), e);
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
Expand Down Expand Up @@ -522,7 +522,7 @@ private Response process(QueryManager qm, Project project, List<FormDataBodyPart
try (final var inputStream = bodyPartEntity.getInputStream();
final var byteOrderMarkInputStream = new BOMInputStream(inputStream)) {
final byte[] bomBytes = IOUtils.toByteArray(byteOrderMarkInputStream);
validateAndStoreBom(bomUploadEvent.getChainIdentifier(), bomBytes);
validateAndStoreBom(bomUploadEvent.getChainIdentifier(), bomBytes, project);
} catch (IOException e) {
LOGGER.error("An unexpected error occurred while validating or storing a BOM uploaded to project: " + project.getUuid(), e);
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,11 @@ public class LocalBomUploadStorageFactory implements ExtensionFactory<BomUploadS

private static final Logger LOGGER = Logger.getLogger(LocalBomUploadStorageFactory.class);

private static final ConfigDefinition CONFIG_DIRECTORY = new ConfigDefinition("directory", ConfigSource.DEPLOYMENT, false, false);
private static final ConfigDefinition CONFIG_DIRECTORY = new ConfigDefinition(
"directory",
ConfigSource.DEPLOYMENT,
/* isRequired */ false,
/* isSecret */ false);

private Path directoryPath;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/
package org.dependencytrack.storage;

import alpine.common.logging.Logger;
import io.minio.GetObjectArgs;
import io.minio.GetObjectResponse;
import io.minio.MinioClient;
Expand All @@ -37,6 +38,8 @@
*/
class S3BomUploadStorage implements BomUploadStorage {

private static final Logger LOGGER = Logger.getLogger(S3BomUploadStorage.class);

static final String EXTENSION_NAME = "s3";

private final MinioClient s3Client;
Expand Down Expand Up @@ -99,7 +102,8 @@ public boolean deleteBomByToken(final UUID token) throws IOException {
}

@Override
public int deleteBomsForRetentionDuration(final Duration duration) throws IOException {
public int deleteBomsForRetentionDuration(final Duration duration) {
LOGGER.info("Not deleting any BOMs; Retention is managed via bucket retention policy");
return 0;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,31 @@ public class S3BomUploadStorageFactory implements ExtensionFactory<BomUploadStor

private static final Logger LOGGER = Logger.getLogger(S3BomUploadStorageFactory.class);

private static final ConfigDefinition CONFIG_ENDPOINT = new ConfigDefinition("endpoint", ConfigSource.DEPLOYMENT, true, false);
private static final ConfigDefinition CONFIG_BUCKET = new ConfigDefinition("bucket", ConfigSource.DEPLOYMENT, true, false);
private static final ConfigDefinition CONFIG_ACCESS_KEY = new ConfigDefinition("access.key", ConfigSource.DEPLOYMENT, false, true);
private static final ConfigDefinition CONFIG_SECRET_KEY = new ConfigDefinition("secret.key", ConfigSource.DEPLOYMENT, false, true);
private static final ConfigDefinition CONFIG_REGION = new ConfigDefinition("region", ConfigSource.DEPLOYMENT, false, false);
private static final ConfigDefinition CONFIG_ENDPOINT = new ConfigDefinition(
"endpoint",
ConfigSource.DEPLOYMENT,
/* isRequired */ true,
/* isSecret */ false);
private static final ConfigDefinition CONFIG_BUCKET = new ConfigDefinition(
"bucket",
ConfigSource.DEPLOYMENT,
/* isRequired */ true,
/* isSecret */ false);
private static final ConfigDefinition CONFIG_ACCESS_KEY = new ConfigDefinition(
"access.key",
ConfigSource.DEPLOYMENT,
/* isRequired */ false,
/* isSecret */ true);
private static final ConfigDefinition CONFIG_SECRET_KEY = new ConfigDefinition(
"secret.key",
ConfigSource.DEPLOYMENT,
/* isRequired */ false,
/* isSecret */ true);
private static final ConfigDefinition CONFIG_REGION = new ConfigDefinition(
"region",
ConfigSource.DEPLOYMENT,
/* isRequired */ false,
/* isSecret */ false);

private MinioClient s3Client;
private String bucketName;
Expand Down
3 changes: 3 additions & 0 deletions src/main/java/org/dependencytrack/tasks/TaskScheduler.java
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import org.dependencytrack.event.PortfolioVulnerabilityAnalysisEvent;
import org.dependencytrack.event.VulnerabilityMetricsUpdateEvent;
import org.dependencytrack.event.VulnerabilityPolicyFetchEvent;
import org.dependencytrack.event.maintenance.BomUploadStorageMaintenanceEvent;
import org.dependencytrack.event.maintenance.ComponentMetadataMaintenanceEvent;
import org.dependencytrack.event.maintenance.MetricsMaintenanceEvent;
import org.dependencytrack.event.maintenance.TagMaintenanceEvent;
Expand All @@ -44,6 +45,7 @@
import org.dependencytrack.event.maintenance.WorkflowMaintenanceEvent;
import org.dependencytrack.model.ConfigPropertyConstants;
import org.dependencytrack.persistence.QueryManager;
import org.dependencytrack.tasks.maintenance.BomUploadStorageMaintenanceTask;
import org.dependencytrack.tasks.maintenance.ComponentMetadataMaintenanceTask;
import org.dependencytrack.tasks.maintenance.MetricsMaintenanceTask;
import org.dependencytrack.tasks.maintenance.TagMaintenanceTask;
Expand Down Expand Up @@ -90,6 +92,7 @@ private TaskScheduler() {
Map.entry(new PortfolioVulnerabilityAnalysisEvent(), getCronScheduleForTask(VulnerabilityAnalysisTask.class)),
Map.entry(new PortfolioRepositoryMetaAnalysisEvent(), getCronScheduleForTask(RepositoryMetaAnalysisTask.class)),
Map.entry(new IntegrityMetaInitializerEvent(), getCronScheduleForTask(IntegrityMetaInitializerTask.class)),
Map.entry(new BomUploadStorageMaintenanceEvent(), getCronScheduleForTask(BomUploadStorageMaintenanceTask.class)),
Map.entry(new ComponentMetadataMaintenanceEvent(), getCronScheduleForTask(ComponentMetadataMaintenanceTask.class)),
Map.entry(new MetricsMaintenanceEvent(), getCronScheduleForTask(MetricsMaintenanceTask.class)),
Map.entry(new TagMaintenanceEvent(), getCronScheduleForTask(TagMaintenanceTask.class)),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
* This file is part of Dependency-Track.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) OWASP Foundation. All Rights Reserved.
*/
package org.dependencytrack.tasks.maintenance;

import alpine.Config;
import alpine.common.logging.Logger;
import alpine.event.framework.Event;
import alpine.event.framework.Subscriber;
import org.dependencytrack.event.maintenance.BomUploadStorageMaintenanceEvent;
import org.dependencytrack.plugin.PluginManager;
import org.dependencytrack.storage.BomUploadStorage;

import java.io.IOException;
import java.time.Duration;

import static net.javacrumbs.shedlock.core.LockAssert.assertLocked;
import static org.dependencytrack.common.ConfigKey.BOM_UPLOAD_STORAGE_RETENTION_DURATION;
import static org.dependencytrack.util.LockProvider.executeWithLock;
import static org.dependencytrack.util.TaskUtil.getLockConfigForTask;

/**
* @since 5.6.0
*/
public class BomUploadStorageMaintenanceTask implements Subscriber {

private static final Logger LOGGER = Logger.getLogger(BomUploadStorageMaintenanceTask.class);

private final Config config;
private final PluginManager pluginManager;

@SuppressWarnings("unused") // Called by Alpine's event system
public BomUploadStorageMaintenanceTask() {
this(Config.getInstance(), PluginManager.getInstance());
}

BomUploadStorageMaintenanceTask(final Config config, final PluginManager pluginManager) {
this.config = config;
this.pluginManager = pluginManager;
}

@Override
public void inform(final Event event) {
if (!(event instanceof BomUploadStorageMaintenanceEvent)) {
return;
}

final long startTimeNs = System.nanoTime();
try {
LOGGER.info("Starting BOM upload storage maintenance");
final Statistics statistics = executeWithLock(
getLockConfigForTask(BomUploadStorageMaintenanceTask.class),
this::informLocked);
if (statistics == null) {
LOGGER.info("Task is locked by another instance; Skipping");
return;
}

final var taskDuration = Duration.ofNanos(System.nanoTime() - startTimeNs);
LOGGER.info("Completed in %s: %s".formatted(taskDuration, statistics));
} catch (Throwable e) {
final var taskDuration = Duration.ofNanos(System.nanoTime() - startTimeNs);
LOGGER.error("Failed to complete after %s".formatted(taskDuration), e);
}
}

private record Statistics(
Duration retentionDuration,
int deletedBoms) {
}

private Statistics informLocked() throws IOException {
assertLocked();

final Duration retentionDuration = Duration.parse(config.getProperty(BOM_UPLOAD_STORAGE_RETENTION_DURATION));

final int numDeleted;
try (final var storage = pluginManager.getExtension(BomUploadStorage.class)) {
numDeleted = storage.deleteBomsForRetentionDuration(retentionDuration);
}

return new Statistics(retentionDuration, numDeleted);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,12 @@ option java_package = "org.dependencytrack.proto.event.v1alpha1";

// Notifies that a BOM was uploaded.
message BomUploadedEvent {
string token = 1; // Foo
string token = 1; // Event token for which the BOM was uploaded.
Project project = 2; // Project the BOM was uploaded to.

message Project {
string uuid = 1; // UUID of the project.
string name = 2; // Name of the project.
string version = 3; // Version of the project.
}

}
43 changes: 43 additions & 0 deletions src/main/resources/application.properties
Original file line number Diff line number Diff line change
Expand Up @@ -1402,6 +1402,31 @@ dev.services.image.kafka=docker.redpanda.com/vectorized/redpanda:v24.2.4
# @type: string
dev.services.image.postgres=postgres:16

# Cron expression of the BOM upload storage maintenance task.
#
# @category: Task Scheduling
# @type: cron
# @required
task.bom.upload.storage.maintenance.cron=30 * * * *

# Maximum duration in ISO 8601 format for which the BOM upload storage maintenance task will hold a lock.
# <br/><br/>
# The duration should be long enough to cover the task's execution duration.
#
# @category: Task Scheduling
# @type: duration
# @required
task.bom.upload.storage.maintenance.lock.max.duration=PT15M

# Minimum duration in ISO 8601 format for which the BOM upload storage maintenance task will hold a lock.
# <br/><br/>
# The duration should be long enough to cover eventual clock skew across API server instances.
#
# @category: Task Scheduling
# @type: duration
# @required
task.bom.upload.storage.maintenance.lock.min.duration=PT1M

# Cron expression of the component metadata maintenance task.
# <br/><br/>
# The task deletes orphaned records from the `INTEGRITY_META_COMPONENT` and
Expand Down Expand Up @@ -1589,6 +1614,24 @@ task.workflow.maintenance.lock.min.duration=PT1M
# @required
bom.upload.storage.compression.level=3

# Defines for how long uploaded BOMs will be retained.
# <br/><br/>
# Uploaded BOMs will be deleted from storage after successful processing.
# A BOM upload will remain in storage if either deleting the BOM, or processing it failed.
# Retention is thus only relevant for BOMs that either:
# <ul>
# <li>failed to be processed, or</li>
# <li>failed to be deleted after processing</li>
# </ul>
# <br/><br/>
# Note that not all storage extensions support this option.
# For example `s3` handles retention via bucket retention policies.
#
# @category: Storage
# @type: duration
# @required
bom.upload.storage.retention.duration=PT3H

# Defines the BOM upload storage extension to use.
# When null, an enabled extension will be chosen based on its priority.
# It is recommended to explicitly configure a extension for predictable behavior.
Expand Down
Loading

0 comments on commit 04d9efc

Please sign in to comment.