From caef8f584e51795f2e7341d555036bcdfbb06eca Mon Sep 17 00:00:00 2001 From: nscuro Date: Fri, 20 Sep 2024 13:14:14 +0200 Subject: [PATCH] Fix merge conflict artifacts Signed-off-by: nscuro --- .../persistence/jdbi/BomDao.java | 2 +- .../resources/v1/BomResource.java | 102 +++++++++--------- .../resources/v1/VexResource.java | 4 +- .../storage/LocalBomUploadStorageFactory.java | 19 ++-- src/main/resources/application.properties | 58 +++++----- .../processor/BomUploadProcessorTest.java | 22 ++-- .../v1/ConfigPropertyResourceTest.java | 1 + 7 files changed, 105 insertions(+), 103 deletions(-) diff --git a/src/main/java/org/dependencytrack/persistence/jdbi/BomDao.java b/src/main/java/org/dependencytrack/persistence/jdbi/BomDao.java index 94f3781e5..53f7387e9 100644 --- a/src/main/java/org/dependencytrack/persistence/jdbi/BomDao.java +++ b/src/main/java/org/dependencytrack/persistence/jdbi/BomDao.java @@ -59,7 +59,7 @@ ON CONFLICT ("TOKEN") @SqlUpdate(""" DELETE FROM "BOM_UPLOAD" - WHERE "UPLOADED_AT" < (NOW() - :duration) + WHERE NOW() - "UPLOADED_AT" > :duration """) int deleteAllUploadsForRetentionDuration(@Bind Duration duration); diff --git a/src/main/java/org/dependencytrack/resources/v1/BomResource.java b/src/main/java/org/dependencytrack/resources/v1/BomResource.java index 72a0167d7..12eedd9b8 100644 --- a/src/main/java/org/dependencytrack/resources/v1/BomResource.java +++ b/src/main/java/org/dependencytrack/resources/v1/BomResource.java @@ -20,6 +20,7 @@ import alpine.Config; import alpine.common.logging.Logger; +import alpine.model.ConfigProperty; import alpine.notification.Notification; import alpine.notification.NotificationLevel; import alpine.server.auth.PermissionRequired; @@ -90,6 +91,7 @@ import java.util.Base64; import java.util.List; import java.util.Set; +import java.util.UUID; import static java.util.function.Predicate.not; import static org.dependencytrack.common.ConfigKey.BOM_UPLOAD_STORAGE_COMPRESSION_LEVEL; @@ -502,7 +504,7 @@ private Response process(QueryManager qm, Project project, List validationModeTags; - try { - final JsonReader jsonParser = Json.createReader(new StringReader(tagsProperty.getPropertyValue())); - final JsonArray jsonArray = jsonParser.readArray(); - validationModeTags = Set.copyOf(jsonArray.getValuesAs(JsonString::getString)); - } catch (RuntimeException e) { - LOGGER.warn("Tags of property %s:%s could not be parsed as JSON array" - .formatted(tagsPropertyConstant.getGroupName(), tagsPropertyConstant.getPropertyName()), e); - return validationMode == BomValidationMode.DISABLED_FOR_TAGS; - } + final ConfigPropertyConstants tagsPropertyConstant = validationMode == BomValidationMode.ENABLED_FOR_TAGS + ? BOM_VALIDATION_TAGS_INCLUSIVE + : BOM_VALIDATION_TAGS_EXCLUSIVE; + final ConfigProperty tagsProperty = qm.getConfigProperty( + tagsPropertyConstant.getGroupName(), + tagsPropertyConstant.getPropertyName() + ); - final boolean doTagsMatch = project.getTags().stream() - .map(org.dependencytrack.model.Tag::getName) - .anyMatch(validationModeTags::contains); - return (validationMode == BomValidationMode.ENABLED_FOR_TAGS && doTagsMatch) - || (validationMode == BomValidationMode.DISABLED_FOR_TAGS && !doTagsMatch); + final Set validationModeTags; + try { + final JsonReader jsonParser = Json.createReader(new StringReader(tagsProperty.getPropertyValue())); + final JsonArray jsonArray = jsonParser.readArray(); + validationModeTags = Set.copyOf(jsonArray.getValuesAs(JsonString::getString)); + } catch (RuntimeException e) { + LOGGER.warn("Tags of property %s:%s could not be parsed as JSON array" + .formatted(tagsPropertyConstant.getGroupName(), tagsPropertyConstant.getPropertyName()), e); + return validationMode == BomValidationMode.DISABLED_FOR_TAGS; } + + final boolean doTagsMatch = project.getTags().stream() + .map(org.dependencytrack.model.Tag::getName) + .anyMatch(validationModeTags::contains); + return (validationMode == BomValidationMode.ENABLED_FOR_TAGS && doTagsMatch) + || (validationMode == BomValidationMode.DISABLED_FOR_TAGS && !doTagsMatch); } } diff --git a/src/main/java/org/dependencytrack/resources/v1/VexResource.java b/src/main/java/org/dependencytrack/resources/v1/VexResource.java index 1ce36f01b..31402b54d 100644 --- a/src/main/java/org/dependencytrack/resources/v1/VexResource.java +++ b/src/main/java/org/dependencytrack/resources/v1/VexResource.java @@ -260,7 +260,7 @@ private Response process(QueryManager qm, Project project, String encodedVexData return Response.status(Response.Status.FORBIDDEN).entity("Access to the specified project is forbidden").build(); } final byte[] decoded = Base64.getDecoder().decode(encodedVexData); - BomResource.validate(decoded, project); + BomResource.validate(qm, decoded, project); final VexUploadEvent vexUploadEvent = new VexUploadEvent(project.getUuid(), decoded); Event.dispatch(vexUploadEvent); return Response.ok(Collections.singletonMap("token", vexUploadEvent.getChainIdentifier())).build(); @@ -281,7 +281,7 @@ private Response process(QueryManager qm, Project project, List { - final Path path = Config.getInstance().getDataDirectorty().toPath().resolve("bom-uploads"); - try { - return Files.createDirectories(path); - } catch (IOException e) { - throw new IllegalStateException(""" - Failed to create directory for BOM upload storage at %s\ - """.formatted(path), e); - } - }); + .orElseGet(() -> Config.getInstance().getDataDirectorty().toPath().resolve("bom-uploads")); + + try { + Files.createDirectories(directoryPath); + } catch (IOException e) { + throw new IllegalStateException(""" + Failed to create directory for BOM upload storage at %s\ + """.formatted(directoryPath), e); + } final boolean canRead = directoryPath.toFile().canRead(); final boolean canWrite = directoryPath.toFile().canWrite(); diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index ea863e279..9cba6e07c 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -703,44 +703,44 @@ dt.kafka.topic.prefix= # @category: Kafka # @type: integer # @required -alpine.kafka.processor.bom.upload.max.concurrency=-1 +kafka.processor.bom.upload.max.concurrency=-1 # @category: Kafka # @type: enum # @valid-values: [key, partition, unordered] # @required -alpine.kafka.processor.bom.upload.processing.order=key +kafka.processor.bom.upload.processing.order=key # @category: Kafka # @type: integer # @required -alpine.kafka.processor.bom.upload.retry.initial.delay.ms=3000 +kafka.processor.bom.upload.retry.initial.delay.ms=3000 # @category: Kafka # @type: integer # @required -alpine.kafka.processor.bom.upload.retry.multiplier=2 +kafka.processor.bom.upload.retry.multiplier=2 # @category: Kafka # @type: double # @required -alpine.kafka.processor.bom.upload.retry.randomization.factor=0.3 +kafka.processor.bom.upload.retry.randomization.factor=0.3 # @category: Kafka # @type: integer # @required -alpine.kafka.processor.bom.upload.retry.max.delay.ms=180000 +kafka.processor.bom.upload.retry.max.delay.ms=180000 # @category: Kafka # @type: string # @required -alpine.kafka.processor.bom.upload.consumer.group.id=dtrack-apiserver-processor +kafka.processor.bom.upload.consumer.group.id=dtrack-apiserver-processor # @category: Kafka # @type: enum # @valid-values: [earliest, latest, none] # @required -alpine.kafka.processor.bom.upload.consumer.auto.offset.reset=earliest +kafka.processor.bom.upload.consumer.auto.offset.reset=earliest # Defines the maximum number of threads to process records from the `dtrack.vulnerability` topic. #

@@ -789,16 +789,16 @@ kafka.processor.vuln.mirror.consumer.group.id=dtrack-apiserver-processor # @required kafka.processor.vuln.mirror.consumer.auto.offset.reset=earliest -# @category: Kafka -# @type: integer -# @required -kafka.processor.epss.mirror.max.concurrency=-1 - # Defines the maximum number of threads to process records from the `dtrack.epss` topic. #

# A value of `-1` will cause the concurrency to match the number of partitions in the input topic. # Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions. # +# @category: Kafka +# @type: integer +# @required +kafka.processor.epss.mirror.max.concurrency=-1 + # @category: Kafka # @type: enum # @valid-values: [key, partition, unordered] @@ -841,16 +841,16 @@ kafka.processor.epss.mirror.consumer.auto.offset.reset=earliest # @required kafka.processor.epss.mirror.max.batch.size=500 -# @category: Kafka -# @type: integer -# @required -kafka.processor.repo.meta.analysis.result.max.concurrency=-1 - # Defines the maximum number of threads to process records from the `dtrack.repo-meta-analysis.result` topic. #

# A value of `-1` will cause the concurrency to match the number of partitions in the input topic. # Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions. # +# @category: Kafka +# @type: integer +# @required +kafka.processor.repo.meta.analysis.result.max.concurrency=-1 + # @category: Kafka # @type: enum # @valid-values: [key, partition, unordered] @@ -888,16 +888,16 @@ kafka.processor.repo.meta.analysis.result.consumer.group.id=dtrack-apiserver-pro # @required kafka.processor.repo.meta.analysis.result.consumer.auto.offset.reset=earliest -# @category: Kafka -# @type: integer -# @required -kafka.processor.vuln.scan.result.max.concurrency=-1 - # Defines the maximum number of threads to process records from the `dtrack.vuln-analysis.result` topic. #

# A value of `-1` will cause the concurrency to match the number of partitions in the input topic. # Parallelization is based on record keys. Concurrency can thus be higher than the number of partitions. # +# @category: Kafka +# @type: integer +# @required +kafka.processor.vuln.scan.result.max.concurrency=-1 + # @category: Kafka # @type: enum # @valid-values: [key, partition, unordered] @@ -1614,10 +1614,10 @@ task.workflow.maintenance.lock.min.duration=PT1M # @required bom.upload.storage.compression.level=3 -# Defines for how long uploaded BOMs will be retained. +# Defines for how long uploaded BOM files will be retained. #

-# Uploaded BOMs will be deleted from storage after successful processing. -# A BOM upload will remain in storage if either deleting the BOM, or processing it failed. +# Uploaded BOM files will be deleted from storage after successful processing. +# A BOM upload will remain in storage if either deleting the BOM file, or processing it failed. # Retention is thus only relevant for BOMs that either: #
    #
  • failed to be processed, or
  • @@ -1633,8 +1633,8 @@ bom.upload.storage.compression.level=3 bom.upload.storage.retention.duration=PT3H # Defines the BOM upload storage extension to use. -# When null, an enabled extension will be chosen based on its priority. -# It is recommended to explicitly configure a extension for predictable behavior. +# When not set, an enabled extension will be chosen based on its priority. +# It is recommended to explicitly configure an extension for predictable behavior. #

    #
      #
    • @@ -1678,7 +1678,7 @@ bom.upload.storage.extension.local.enabled=false # Has no effect unless bom.upload.storage.extension.local.enabled is `true`. # # @category: Storage -# @default: ${java.io.tmpdir}/bom-uploads +# @default: ${alpine.data.directory}/bom-uploads # @type: string # bom.upload.storage.extension.local.directory= diff --git a/src/test/java/org/dependencytrack/event/kafka/processor/BomUploadProcessorTest.java b/src/test/java/org/dependencytrack/event/kafka/processor/BomUploadProcessorTest.java index ec4bacf5a..8853aa9d1 100644 --- a/src/test/java/org/dependencytrack/event/kafka/processor/BomUploadProcessorTest.java +++ b/src/test/java/org/dependencytrack/event/kafka/processor/BomUploadProcessorTest.java @@ -1406,15 +1406,15 @@ public void informIssue3957Test() throws Exception { @Test public void informIssue3936Test() throws Exception{ - final Project project = qm.createProject("Acme Example", null, "1.0", null, null, null, true, false); qm.persist(project); List boms = new ArrayList<>(Arrays.asList("bom-issue3936-authors.json", "bom-issue3936-author.json", "bom-issue3936-both.json")); int i=0; for(String bom : boms){ - final var bomUploadEvent = new BomUploadEvent(qm.detach(Project.class, project.getId()), createTempBomFile(bom)); - qm.createWorkflowSteps(bomUploadEvent.getChainIdentifier()); - new BomUploadProcessingTask().inform(bomUploadEvent); + final var token = UUID.randomUUID(); + final BomUploadedEvent bomUploadedEvent = createEvent(token, project, bom); + qm.createWorkflowSteps(token); + new BomUploadProcessor().process(aConsumerRecord(project.getUuid(), bomUploadedEvent).build()); assertBomProcessedNotification(); qm.getPersistenceManager().evictAll(); assertThat(qm.getAllComponents(project)).isNotEmpty(); @@ -1479,9 +1479,10 @@ public void informWithExistingDuplicateComponentPropertiesAndBomWithDuplicateCom ] } """.getBytes(StandardCharsets.UTF_8); - final var bomUploadEvent = new BomUploadEvent(qm.detach(Project.class, project.getId()), createTempBomFile(bomBytes)); - qm.createWorkflowSteps(bomUploadEvent.getChainIdentifier()); - new BomUploadProcessingTask().inform(bomUploadEvent); + final var token = UUID.randomUUID(); + final BomUploadedEvent bomUploadedEvent = createEvent(token, project, bomBytes); + qm.createWorkflowSteps(token); + new BomUploadProcessor().process(aConsumerRecord(project.getUuid(), bomUploadedEvent).build()); assertBomProcessedNotification(); qm.getPersistenceManager().evictAll(); @@ -1523,9 +1524,10 @@ public void informWithEmptyComponentAndServiceNameTest() throws Exception { } """.getBytes(StandardCharsets.UTF_8); - final var bomUploadEvent = new BomUploadEvent(qm.detach(Project.class, project.getId()), createTempBomFile(bomBytes)); - qm.createWorkflowSteps(bomUploadEvent.getChainIdentifier()); - new BomUploadProcessingTask().inform(bomUploadEvent); + final var token = UUID.randomUUID(); + final BomUploadedEvent bomUploadedEvent = createEvent(token, project, bomBytes); + qm.createWorkflowSteps(token); + new BomUploadProcessor().process(aConsumerRecord(project.getUuid(), bomUploadedEvent).build()); assertBomProcessedNotification(); qm.getPersistenceManager().evictAll(); diff --git a/src/test/java/org/dependencytrack/resources/v1/ConfigPropertyResourceTest.java b/src/test/java/org/dependencytrack/resources/v1/ConfigPropertyResourceTest.java index efc8c085f..127ae6148 100644 --- a/src/test/java/org/dependencytrack/resources/v1/ConfigPropertyResourceTest.java +++ b/src/test/java/org/dependencytrack/resources/v1/ConfigPropertyResourceTest.java @@ -37,6 +37,7 @@ import java.util.Arrays; +import static net.javacrumbs.jsonunit.assertj.JsonAssertions.assertThatJson; import static org.assertj.core.api.Assertions.assertThat; import static org.dependencytrack.model.ConfigPropertyConstants.CUSTOM_RISK_SCORE_CRITICAL; import static org.dependencytrack.model.ConfigPropertyConstants.CUSTOM_RISK_SCORE_HIGH;