From 94d0621bd9734109a1f63d41504664a0fd24a449 Mon Sep 17 00:00:00 2001 From: Niklas Date: Tue, 16 Apr 2024 16:14:42 +0200 Subject: [PATCH] Enrich `application.properties` with annotations (#633) --- .github/workflows/config-docs-update.yml | 42 + src/main/resources/application.properties | 888 +++++++++++++++++----- 2 files changed, 743 insertions(+), 187 deletions(-) create mode 100644 .github/workflows/config-docs-update.yml diff --git a/.github/workflows/config-docs-update.yml b/.github/workflows/config-docs-update.yml new file mode 100644 index 000000000..e6b274e71 --- /dev/null +++ b/.github/workflows/config-docs-update.yml @@ -0,0 +1,42 @@ +# This file is part of Dependency-Track. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) OWASP Foundation. All Rights Reserved. +name: Update Config Documentation + +on: + push: + branches: + - main + paths: + - src/main/resources/application.properties + +permissions: { } + +jobs: + trigger-update: + name: Trigger Update + runs-on: ubuntu-latest + timeout-minutes: 5 + if: "${{ github.repository_owner == 'DependencyTrack' }}" + steps: + - name: Dispatch Update Event + env: + # NB: The default GITHUB_TOKEN is scoped to the repository where + # the workflow is running, thus can't trigger workflows in other + # repos. Requires a PAT with `write` role to the target instead. + GH_TOKEN: "${{ secrets.BOT_RELEASE_TOKEN }}" + run: |- + gh workflow run update-config-docs.yml -R DependencyTrack/hyades \ No newline at end of file diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index d9e2b1e70..b2eafe2e8 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -1,6 +1,3 @@ -############################ Alpine Configuration ########################### - -# Required # Defines the number of worker threads that the event subsystem will consume. # Events occur asynchronously and are processed by the Event subsystem. This # value should be large enough to handle most production situations without @@ -8,100 +5,149 @@ # already resource-constrained server. # A value of 0 will instruct Alpine to allocate 1 thread per CPU core. This # can further be tweaked using the alpine.worker.thread.multiplier property. -# Default value is 0. +# +# @category: Task Execution +# @type: integer +# @required alpine.worker.threads=0 -# Required # Defines a multiplier that is used to calculate the number of threads used # by the event subsystem. This property is only used when alpine.worker.threads # is set to 0. A machine with 4 cores and a multiplier of 4, will use (at most) -# 16 worker threads. Default value is 4. +# 16 worker threads. +# +# @category: Task Execution +# @type: integer +# @required alpine.worker.thread.multiplier=4 -# Required # Defines the path to the data directory. This directory will hold logs, # keys, and any database or index files along with application-specific # files or directories. +# +# @category: General +# @type: string +# @required alpine.data.directory=~/.dependency-track -# Optional # Defines the path to the secret key to be used for data encryption and decryption. # The key will be generated upon first startup if it does not exist. -# Default is "/keys/secret.key". -# alpine.secret.key.path=/var/run/secrets/secret.key +# +# @category: General +# @default: ${alpine.data.directory}/keys/secret.key +# @type: string +# alpine.secret.key.path= -# Optional # Defines the paths to the public-private key pair to be used for signing and verifying digital signatures. # The keys will be generated upon first startup if they do not exist. -# Defaults are "/keys/private.key" and "/keys/public.key". -# alpine.private.key.path=/var/run/secrets/private.key -# alpine.public.key.path=/var/run/secrets/public.key +# +# @category: General +# @default: ${alpine.data.directory}/keys/private.key +# @example: /var/run/secrets/private.key +# @type: string +# alpine.private.key.path= + +# Defines the paths to the public-private key pair to be used for signing and verifying digital signatures. +# The keys will be generated upon first startup if they do not exist. +# +# @category: General +# @default: ${alpine.data.directory}/keys/public.key +# @example: /var/run/secrets/public.key +# @type: string +# alpine.public.key.path= -# Optional # Defines the prefix to be used for API keys. A maximum prefix length of 251 -# characters is supported. -# The prefix may also be left empty. +# characters is supported. The prefix may also be left empty. +# +# @category: General +# @type: string alpine.api.key.prefix=odt_ -# Required -# Defines the interval (in seconds) to log general heath information. +# Defines the interval (in seconds) to log general health information. # If value equals 0, watchdog logging will be disabled. +# +# @hidden alpine.watchdog.logging.interval=0 -# Required # Defines the database mode of operation. Valid choices are: # 'server', 'embedded', and 'external'. # In server mode, the database will listen for connections from remote # hosts. In embedded mode, the system will be more secure and slightly # faster. External mode should be used when utilizing an external # database server (i.e. mysql, postgresql, etc). +# +# @hidden alpine.database.mode=external -# Optional # Defines the TCP port to use when the database.mode is set to 'server'. +# +# @category: Database +# @type: string +# @hidden alpine.database.port=9092 -# Required # Specifies the JDBC URL to use when connecting to the database. -#alpine.database.url=jdbc:postgresql://localhost:5432/dtrack +# +# @category: Database +# @example: jdbc:postgresql://localhost:5432/dtrack +# @type: string +# @required +alpine.database.url= -# Required # Specifies the JDBC driver class to use. +# +# @category: Database +# @type: string +# @hidden alpine.database.driver=org.postgresql.Driver -# Optional -# Specifies the path (including filename) to where the JDBC driver is located. -# alpine.database.driver.path=/path/to/dbdriver.jar - -# Optional # Specifies the username to use when authenticating to the database. -#alpine.database.username=dtrack +# +# @category: Database +# @type: string +alpine.database.username=dtrack -# Optional # Specifies the password to use when authenticating to the database. -#alpine.database.password=dtrack +# +# @category: Database +# @type: string +alpine.database.password=dtrack -# Optional # Specifies if database migrations should be performed automatically on startup, based on # the defined object model of the application. This MUST be disabled as Liquibase is used # for schema migrations. +# +# @category: Database +# @type: boolean +# @hidden alpine.database.migration.enabled=false -# Optional # Specifies if the database connection pool is enabled. +# +# @category: Database +# @type: boolean alpine.database.pool.enabled=true -# Optional # This property controls the maximum size that the pool is allowed to reach, # including both idle and in-use connections. # The property can be set globally for both transactional and non-transactional # connection pools, or for each pool type separately. When both global and pool-specific # properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.max.size=20 + +# @category: Database +# @default: ${alpine.database.pool.max.size} +# @type: integer # alpine.database.pool.tx.max.size= + +# @category: Database +# @default: ${alpine.database.pool.max.size} +# @type: integer # alpine.database.pool.nontx.max.size= -# Optional # This property controls the minimum number of idle connections in the pool. # This value should be equal to or less than alpine.database.pool.max.size. # Warning: If the value is less than alpine.database.pool.max.size, @@ -109,32 +155,62 @@ alpine.database.pool.max.size=20 # The property can be set globally for both transactional and non-transactional # connection pools, or for each pool type separately. When both global and pool-specific # properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.min.idle=10 + +# @category: Database +# @default: ${alpine.database.pool.min.idle} +# @type: integer # alpine.database.pool.tx.min.idle= + +# @category: Database +# @default: ${alpine.database.pool.min.idle} +# @type: integer # alpine.database.pool.nontx.min.idle= -# Optional # This property controls the maximum amount of time that a connection is # allowed to sit idle in the pool. # The property can be set globally for both transactional and non-transactional # connection pools, or for each pool type separately. When both global and pool-specific # properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.idle.timeout=300000 + +# @category: Database +# @default: ${alpine.database.pool.idle.timeout} +# @type: integer # alpine.database.pool.tx.idle.timeout= + +# @category: Database +# @default: ${alpine.database.pool.idle.timeout} +# @type: integer # alpine.database.pool.nontx.idle.timeout= -# Optional # This property controls the maximum lifetime of a connection in the pool. # An in-use connection will never be retired, only when it is closed will # it then be removed. # The property can be set globally for both transactional and non-transactional # connection pools, or for each pool type separately. When both global and pool-specific # properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.max.lifetime=600000 + +# @category: Database +# @default: ${alpine.database.pool.max.lifetime} +# @type: integer # alpine.database.pool.tx.max.lifetime= + +# @category: Database +# @default: ${alpine.database.pool.max.lifetime} +# @type: integer # alpine.database.pool.nontx.max.lifetime= -# Optional # Controls the 2nd level cache type used by DataNucleus, the Object Relational Mapper (ORM). # See https://www.datanucleus.org/products/accessplatform_6_0/jdo/persistence.html#cache_level2 # Values supported by Dependency-Track are "soft" (default), "weak", and "none". @@ -144,136 +220,191 @@ alpine.database.pool.max.lifetime=600000 # Size of the cache may be monitored through the "datanucleus_cache_second_level_entries" metric, # refer to https://docs.dependencytrack.org/getting-started/monitoring/#metrics for details. # -# DO NOT CHANGE UNLESS THERE IS A GOOD REASON TO. +# @category: Database +# @type: enum +# @valid-values: [soft, weak, none] +# @hidden alpine.datanucleus.cache.level2.type=none -# Optional # Defines whether database migrations should be executed on startup. +# +# @category: Database +# @type: boolean run.migrations=true -# Optional # Defines the database JDBC URL to use when executing migrations. # If not set, the value of alpine.database.url will be used. # Should generally not be set, unless TLS authentication is used, # and custom connection variables are required. +# +# @category: Database +# @default: ${alpine.database.url} +# @type: string # database.migration.url= -# Optional # Defines the database user for executing migrations. # If not set, the value of alpine.database.username will be used. +# +# @category: Database +# @default: ${alpine.database.username} +# @type: string # database.migration.username= -# Optional # Defines the database password for executing migrations. # If not set, the value of alpine.database.password will be used. +# +# @category: Database +# @default: ${alpine.database.password} +# @type: string # database.migration.password= -# Required -# Specifies the number of bcrypt rounds to use when hashing a users password. +# Specifies the number of bcrypt rounds to use when hashing a user's password. # The higher the number the more secure the password, at the expense of # hardware resources and additional time to generate the hash. +# +# @category: General +# @type: integer +# @required alpine.bcrypt.rounds=14 -# Required # Defines if LDAP will be used for user authentication. If enabled, -# alpine.ldap.* properties should be set accordingly. +# `alpine.ldap.*` properties should be set accordingly. +# +# @category: LDAP +# @type: boolean alpine.ldap.enabled=false -# Optional -# Specifies the LDAP server URL -# Example (Microsoft Active Directory): -# alpine.ldap.server.url=ldap://ldap.example.com:3268 -# alpine.ldap.server.url=ldaps://ldap.example.com:3269 -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.server.url=ldap://ldap.example.com:389 -# alpine.ldap.server.url=ldaps://ldap.example.com:636 -alpine.ldap.server.url=ldap://ldap.example.com:389 +# Specifies the LDAP server URL. +#

+# Examples (Microsoft Active Directory): +#
    +#
  • ldap://ldap.example.com:3268
  • +#
  • ldaps://ldap.example.com:3269
  • +#
+# Examples (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +#
    +#
  • ldap://ldap.example.com:389
  • +#
  • ldaps://ldap.example.com:636
  • +#
+# +# @category: LDAP +# @type: string +alpine.ldap.server.url= -# Optional # Specifies the base DN that all queries should search from -alpine.ldap.basedn=dc=example,dc=com +# +# @category: LDAP +# @example: dc=example,dc=com +# @type: string +alpine.ldap.basedn= -# Optional # Specifies the LDAP security authentication level to use. Its value is one of # the following strings: "none", "simple", "strong". If this property is empty # or unspecified, the behaviour is determined by the service provider. +# +# @category: LDAP +# @type: enum +# @valid-values: [none, simple, strong] alpine.ldap.security.auth=simple -# Optional # If anonymous access is not permitted, specify a username with limited access # to the directory, just enough to perform searches. This should be the fully # qualified DN of the user. +# +# @category: LDAP +# @type: string alpine.ldap.bind.username= -# Optional # If anonymous access is not permitted, specify a password for the username # used to bind. +# +# @category: LDAP +# @type: string alpine.ldap.bind.password= -# Optional # Specifies if the username entered during login needs to be formatted prior # to asserting credentials against the directory. For Active Directory, the # userPrincipal attribute typically ends with the domain, whereas the # samAccountName attribute and other directory server implementations do not. -# The %s variable will be substitued with the username asserted during login. +# The %s variable will be substituted with the username asserted during login. +#

# Example (Microsoft Active Directory): -# alpine.ldap.auth.username.format=%s@example.com +#
  • %s@example.com
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.auth.username.format=%s -alpine.ldap.auth.username.format=%s@example.com +#
  • %s
+# +# @category: LDAP +# @example: %s@example.com +# @type: string +alpine.ldap.auth.username.format= -# Optional -# Specifies the Attribute that identifies a users ID +# Specifies the Attribute that identifies a users ID. +#

# Example (Microsoft Active Directory): -# alpine.ldap.attribute.name=userPrincipalName +#
  • userPrincipalName
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.attribute.name=uid +#
  • uid
+# +# @category: LDAP +# @type: string alpine.ldap.attribute.name=userPrincipalName -# Optional # Specifies the LDAP attribute used to store a users email address +# +# @category: LDAP +# @type: string alpine.ldap.attribute.mail=mail -# Optional -# Specifies the LDAP search filter used to retrieve all groups from the -# directory. +# Specifies the LDAP search filter used to retrieve all groups from the directory. +#

# Example (Microsoft Active Directory): -# alpine.ldap.groups.filter=(&(objectClass=group)(objectCategory=Group)) +#
  • (&(objectClass=group)(objectCategory=Group))
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.groups.filter=(&(objectClass=groupOfUniqueNames)) +#
  • (&(objectClass=groupOfUniqueNames))
+# +# @category: LDAP +# @type: string alpine.ldap.groups.filter=(&(objectClass=group)(objectCategory=Group)) -# Optional # Specifies the LDAP search filter to use to query a user and retrieve a list -# of groups the user is a member of. The {USER_DN} variable will be substituted +# of groups the user is a member of. The `{USER_DN}` variable will be substituted # with the actual value of the users DN at runtime. +#

# Example (Microsoft Active Directory): -# alpine.ldap.user.groups.filter=(&(objectClass=group)(objectCategory=Group)(member={USER_DN})) +#
  • (&(objectClass=group)(objectCategory=Group)(member={USER_DN}))
# Example (Microsoft Active Directory - with nested group support): -# alpine.ldap.user.groups.filter=(member:1.2.840.113556.1.4.1941:={USER_DN}) +#
  • (member:1.2.840.113556.1.4.1941:={USER_DN})
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.user.groups.filter=(&(objectClass=groupOfUniqueNames)(uniqueMember={USER_DN})) +#
  • (&(objectClass=groupOfUniqueNames)(uniqueMember={USER_DN}))
+# +# @category: LDAP +# @type: string alpine.ldap.user.groups.filter=(member:1.2.840.113556.1.4.1941:={USER_DN}) -# Optional # Specifies the LDAP search filter used to search for groups by their name. -# The {SEARCH_TERM} variable will be substituted at runtime. +# The `{SEARCH_TERM}` variable will be substituted at runtime. +#

# Example (Microsoft Active Directory): -# alpine.ldap.groups.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.groups.search.filter=(&(objectClass=groupOfUniqueNames)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=groupOfUniqueNames)(cn=*{SEARCH_TERM}*))
+# +# @category: LDAP +# @type: string alpine.ldap.groups.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) -# Optional # Specifies the LDAP search filter used to search for users by their name. -# The {SEARCH_TERM} variable will be substituted at runtime. +# The {SEARCH_TERM} variable will be substituted at runtime. +#

# Example (Microsoft Active Directory): -# alpine.ldap.users.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.users.search.filter=(&(objectClass=inetOrgPerson)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=inetOrgPerson)(cn=*{SEARCH_TERM}*))
+# +# @category: LDAP +# @type: string alpine.ldap.users.search.filter=(&(objectClass=user)(objectCategory=Person)(cn=*{SEARCH_TERM}*)) -# Optional # Specifies if mapped LDAP accounts are automatically created upon successful # authentication. When a user logs in with valid credentials but an account has # not been previously provisioned, an authentication failure will be returned. @@ -281,9 +412,11 @@ alpine.ldap.users.search.filter=(&(objectClass=user)(objectCategory=Person)(cn=* # system and which users cannot. When this value is set to true, a local ldap # user will be created and mapped to the ldap account automatically. This # automatic provisioning only affects authentication, not authorization. +# +# @category: LDAP +# @type: boolean alpine.ldap.user.provisioning=false -# Optional # This option will ensure that team memberships for LDAP users are dynamic and # synchronized with membership of LDAP groups. When a team is mapped to an LDAP # group, all local LDAP users will automatically be assigned to the team if @@ -291,77 +424,168 @@ alpine.ldap.user.provisioning=false # removed from the LDAP group, they will also be removed from the team. This # option provides the ability to dynamically control user permissions via an # external directory. +# +# @category: LDAP +# @type: boolean alpine.ldap.team.synchronization=false -# Optional -# HTTP proxy. If the address is set, then the port must be set too. -# alpine.http.proxy.address=proxy.example.com -# alpine.http.proxy.port=8888 +# HTTP proxy address. If set, then alpine.http.proxy.port must be set too. +# +# @category: HTTP +# @example: proxy.example.com +# @type: string +# alpine.http.proxy.address= + +# @category: HTTP +# @example: 8888 +# @type: integer +# alpine.http.proxy.port= + +# @category: HTTP +# @type: string # alpine.http.proxy.username= + +# @category: HTTP +# @type: string # alpine.http.proxy.password= -# alpine.no.proxy=localhost,127.0.0.1 -# Optional -# HTTP Outbound Connection Timeout Settings. All values are in seconds. +# @category: HTTP +# @example: localhost,127.0.0.1 +# @type: string +# alpine.no.proxy= + +# Defines the connection timeout in seconds for outbound HTTP connections. +# +# @category: HTTP +# @type: integer # alpine.http.timeout.connection=30 + +# Defines the socket / read timeout in seconds for outbound HTTP connections. +# +# @category: HTTP +# @type: integer # alpine.http.timeout.socket=30 + +# Defines the request timeout in seconds for outbound HTTP connections. +# +# @category: HTTP +# @type: integer # alpine.http.timeout.pool=60 -# Optional -# Cross-Origin Resource Sharing (CORS) headers to include in REST responses. -# If 'alpine.cors.enabled' is true, CORS headers will be sent, if false, no -# CORS headers will be sent. -# See Also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS -# The following are default values +# Defines whether [Cross Origin Resource Sharing](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) +# (CORS) headers shall be included in REST API responses. +# +# @category: CORS +# @type: boolean # alpine.cors.enabled=true + +# Controls the content of the `Access-Control-Allow-Origin` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.allow.origin=* + +# Controls the content of the `Access-Control-Allow-Methods` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.allow.methods=GET POST PUT DELETE OPTIONS + +# Controls the content of the `Access-Control-Allow-Headers` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.allow.headers=Origin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count, * + +# Controls the content of the `Access-Control-Expose-Headers` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.expose.headers=Origin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count + +# Controls the content of the `Access-Control-Allow-Credentials` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: boolean # alpine.cors.allow.credentials=true + +# Controls the content of the `Access-Control-Max-Age` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: integer # alpine.cors.max.age=3600 -# Optional # Defines whether Prometheus metrics will be exposed. # If enabled, metrics will be available via the /metrics endpoint. +# +# @category: Observability +# @type: boolean alpine.metrics.enabled=false -# Optional # Defines the username required to access metrics. # Has no effect when alpine.metrics.auth.password is not set. +# +# @category: Observability +# @type: string alpine.metrics.auth.username= -# Optional # Defines the password required to access metrics. # Has no effect when alpine.metrics.auth.username is not set. +# +# @category: Observability +# @type: string alpine.metrics.auth.password= -# Required # Defines if OpenID Connect will be used for user authentication. -# If enabled, alpine.oidc.* properties should be set accordingly. +# If enabled, `alpine.oidc.*` properties should be set accordingly. +# +# @category: OpenID Connect +# @type: boolean alpine.oidc.enabled=false -# Optional # Defines the client ID to be used for OpenID Connect. # The client ID should be the same as the one configured for the frontend, # and will only be used to validate ID tokens. +# +# @category: OpenID Connect +# @type: string alpine.oidc.client.id= -# Optional # Defines the issuer URL to be used for OpenID Connect. -# This issuer MUST support provider configuration via the /.well-known/openid-configuration endpoint. +# This issuer MUST support provider configuration via the `/.well-known/openid-configuration` endpoint. # See also: -# - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata -# - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig +#
    +#
  • https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
  • +#
  • https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig
  • +#
+# +# @category: OpenID Connect +# @type: string alpine.oidc.issuer= -# Optional # Defines the name of the claim that contains the username in the provider's userinfo endpoint. -# Common claims are "name", "username", "preferred_username" or "nickname". -# See also: https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse +# Common claims are `name`, `username`, `preferred_username` or `nickname`. +# See also: +#
    +#
  • https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
  • +#
+# +# @category: OpenID Connect +# @type: string alpine.oidc.username.claim=name -# Optional # Specifies if mapped OpenID Connect accounts are automatically created upon successful # authentication. When a user logs in with a valid access token but an account has # not been previously provisioned, an authentication failure will be returned. @@ -369,9 +593,11 @@ alpine.oidc.username.claim=name # system and which users cannot. When this value is set to true, a local OpenID Connect # user will be created and mapped to the OpenID Connect account automatically. This # automatic provisioning only affects authentication, not authorization. +# +# @category: OpenID Connect +# @type: boolean alpine.oidc.user.provisioning=false -# Optional # This option will ensure that team memberships for OpenID Connect users are dynamic and # synchronized with membership of OpenID Connect groups or assigned roles. When a team is # mapped to an OpenID Connect group, all local OpenID Connect users will automatically be @@ -380,61 +606,88 @@ alpine.oidc.user.provisioning=false # option provides the ability to dynamically control user permissions via the identity provider. # Note that team synchronization is only performed during user provisioning and after successful # authentication. +# +# @category: OpenID Connect +# @type: boolean alpine.oidc.team.synchronization=false -# Optional # Defines the name of the claim that contains group memberships or role assignments in the provider's userinfo endpoint. # The claim must be an array of strings. Most public identity providers do not support group or role management. # When using a customizable / on-demand hosted identity provider, name, content, and inclusion in the userinfo endpoint # will most likely need to be configured. +# +# @category: OpenID Connect +# @type: string alpine.oidc.teams.claim=groups -# Required -kafka.bootstrap.servers=localhost:9092 +# @category: Kafka +# @example: localhost:9092 +# @type: string +# @required +kafka.bootstrap.servers= -# Optional +# @category: Kafka +# @type: enum +# @valid-values: [earliest, latest, none] kafka.auto.offset.reset=earliest -# Optional +# @category: Kafka +# @type: integer +# @required kafka.num.stream.threads=3 -#Optional +# @category: Kafka +# @type: boolean kafka.tls.enabled=false -#Optional +# @category: Kafka +# @type: boolean kafka.mtls.enabled=false -#Optional +# @category: Kafka +# @type: enum +# @valid-values: [PLAINTEXT, SASL_SSL_PLAINTEXT, SASL_PLAINTEXT, SSL] kafka.security.protocol= -#Optional +# @category: Kafka +# @type: string kafka.truststore.path= -#Optional +# @category: Kafka +# @type: string kafka.truststore.password= -#Optional +# @category: Kafka +# @type: string kafka.keystore.path= -#Optional +# @category: Kafka +# @type: string kafka.keystore.password= -# Optional +# @category: Kafka +# @type: string kafka.topic.prefix= -# Required +# @category: Kafka +# @type: string +# @required application.id=dtrack-apiserver -# Optional # Defines the number of deserialization errors deemed to be acceptable in a given time frame. # Until the threshold is reached, records failing deserialization will be logged and skipped. # When the threshold is exceeded, further consumption is stopped. # The interval must be specified in ISO8601 duration notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). # The default threshold is 5 errors per 30min. +# +# @category: Kafka +# @type: integer kafka.streams.deserialization.exception.threshold.count=5 + +# @category: Kafka +# @type: duration kafka.streams.deserialization.exception.threshold.interval=PT30M -# Optional # Defines the number of production errors deemed to be acceptable in a given time frame. # Until the threshold is reached, records failing to be produced will be logged and skipped. # When the threshold is exceeded, further production is stopped. @@ -442,20 +695,30 @@ kafka.streams.deserialization.exception.threshold.interval=PT30M # stop of production immediately. # The interval must be specified in ISO8601 duration notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). # The default threshold is 5 errors per 30min. +# +# @category: Kafka +# @type: integer kafka.streams.production.exception.threshold.count=5 + +# @category: Kafka +# @type: duration kafka.streams.production.exception.threshold.interval=PT30M -# Optional # Defines the number of times record processing will be retried in case of unhandled, yet transient errors. # Until the threshold is reached, records fetched since the last successful offset commit will be attempted to be re-processed. # When the threshold is exceeded, further processing is stopped. # Only transient errors will be treated this way; Unexpected or non-transient errors will cause a stop of processing immediately. # The interval must be specified in ISO8601 duration notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). # The default threshold is 50 errors per 30min. +# +# @category: Kafka +# @type: integer kafka.streams.transient.processing.exception.threshold.count=50 + +# @category: Kafka +# @type: duration kafka.streams.transient.processing.exception.threshold.interval=PT30M -# Optional # Defines the order in which records are being processed. # Valid options are: # * partition @@ -463,7 +726,6 @@ kafka.streams.transient.processing.exception.threshold.interval=PT30M # * unordered # alpine.kafka.processor..processing.order=partition -# Optional # Defines the maximum size of record batches being processed. # Batch sizes are further limited by the configured processing order: # * partition: Number of partitions assigned to this processor @@ -472,182 +734,434 @@ kafka.streams.transient.processing.exception.threshold.interval=PT30M # Will be ignored when the processor is not a batch processor. # alpine.kafka.processor..max.batch.size=10 -# Optional # Defines the maximum concurrency with which records are being processed. # For batch processors, a smaller number can improve efficiency and throughput. # A value of -1 indicates that the maximum concurrency should be equal to # the number of partitions in the topic being consumed from. # alpine.kafka.processor..max.concurrency=1 -# Optional # Allows for customization of the processor's retry behavior. # alpine.kafka.processor..retry.initial.delay.ms=1000 # alpine.kafka.processor..retry.multiplier=1 # alpine.kafka.processor..retry.randomization.factor=0.3 # alpine.kafka.processor..retry.max.delay.ms=60000 -# Optional # Allows for customization of the underlying Kafka consumer. # Refer to https://kafka.apache.org/documentation/#consumerconfigs for available options. # alpine.kafka.processor..consumer.= -# Required -# Configures the Kafka processor responsible for ingesting mirrored vulnerability -# data from the dtrack.vulnerability topic. The processor only occasionally receives -# records, such that high concurrency is usually not justified. +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.vuln.mirror.max.concurrency=-1 + +# @category: Kafka +# @type: enum +# @valid-values: [key, partition, unordered] +# @required alpine.kafka.processor.vuln.mirror.processing.order=partition + +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.vuln.mirror.retry.initial.delay.ms=3000 + +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.vuln.mirror.retry.multiplier=2 + +# @category: Kafka +# @type: double +# @required alpine.kafka.processor.vuln.mirror.retry.randomization.factor=0.3 + +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.vuln.mirror.retry.max.delay.ms=180000 + +# @category: Kafka +# @type: string +# @required alpine.kafka.processor.vuln.mirror.consumer.group.id=dtrack-apiserver-processor + +# @category: Kafka +# @type: enum +# @valid-values: [earliest, latest, none] +# @required alpine.kafka.processor.vuln.mirror.consumer.auto.offset.reset=earliest -# Required -# Configures the Kafka processor responsible for ingesting repository metadata -# analysis results from the dtrack.repo-meta-analysis.result topic. +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.repo.meta.analysis.result.max.concurrency=-1 + +# @category: Kafka +# @type: enum +# @valid-values: [key, partition, unordered] +# @required alpine.kafka.processor.repo.meta.analysis.result.processing.order=key + +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.repo.meta.analysis.result.retry.initial.delay.ms=1000 + +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.repo.meta.analysis.result.retry.multiplier=2 + +# @category: Kafka +# @type: double +# @required alpine.kafka.processor.repo.meta.analysis.result.retry.randomization.factor=0.3 + +# @category: Kafka +# @type: integer +# @required alpine.kafka.processor.repo.meta.analysis.result.retry.max.delay.ms=180000 + +# @category: Kafka +# @type: string +# @required alpine.kafka.processor.repo.meta.analysis.result.consumer.group.id=dtrack-apiserver-processor + +# @category: Kafka +# @type: enum +# @valid-values: [earliest, latest, none] +# @required alpine.kafka.processor.repo.meta.analysis.result.consumer.auto.offset.reset=earliest # Scheduling tasks after 3 minutes (3*60*1000) of starting application +# +# @category: Task Scheduling +# @type: integer +# @required task.scheduler.initial.delay=180000 # Cron expressions for tasks have the precision of minutes so polling every minute +# +# @category: Task Scheduling +# @type: integer +# @required task.scheduler.polling.interval=60000 -#specifies how long the lock should be kept in case the executing node dies. -#This is just a fallback, under normal circumstances the lock is released as soon the tasks finishes. -#Set lockAtMostFor to a value which is much longer than normal execution time. Default value is 15min -#Lock will be extended dynamically till task execution is finished +# Specifies how long the lock should be kept in case the executing node dies. +# This is just a fallback, under normal circumstances the lock is released as soon the tasks finishes. +# Set lockAtMostFor to a value which is much longer than normal execution time. Default value is 15min +# Lock will be extended dynamically till task execution is finished +# +# @category: Task Scheduling +# @type: integer +# @required task.metrics.portfolio.lockAtMostForInMillis=900000 -#specifies minimum amount of time for which the lock should be kept. + +# Specifies minimum amount of time for which the lock should be kept. # Its main purpose is to prevent execution from multiple nodes in case of really short tasks and clock difference between the nodes. +# +# @category: Task Scheduling +# @type: integer +# @required task.metrics.portfolio.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required task.metrics.vulnerability.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.metrics.vulnerability.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required task.mirror.epss.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.mirror.epss.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required task.componentIdentification.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.componentIdentification.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required task.ldapSync.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.ldapSync.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required task.workflow.state.cleanup.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.workflow.state.cleanup.lockAtLeastForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.portfolio.repoMetaAnalysis.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.portfolio.repoMetaAnalysis.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required task.portfolio.vulnAnalysis.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required task.portfolio.vulnAnalysis.lockAtLeastForInMillis=90000 + +# @category: Task Scheduling +# @type: integer +# @required integrityMetaInitializer.lockAtMostForInMillis=900000 + +# @category: Task Scheduling +# @type: integer +# @required integrityMetaInitializer.lockAtLeastForInMillis=90000 -#schedule task for 10th minute of every hour +# Schedule task for 10th minute of every hour +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.metrics.portfolio=10 * * * * -#schedule task for 40th minute of every hour + +# Schedule task for 40th minute of every hour +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.metrics.vulnerability=40 * * * * -#schedule task every 24 hrs at 02:00 UTC + +# Schedule task every 24 hrs at 02:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.mirror.github=0 2 * * * -#schedule task every 24 hrs at 03:00 UTC + +# Schedule task every 24 hrs at 03:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.mirror.osv=0 3 * * * -#schedule task every 24 hrs at 04:00 UTC + +# Schedule task every 24 hrs at 04:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.mirror.nist=0 4 * * * -#schedule task every 6 hrs at 25th min + +# Schedule task every 6 hrs at 25th min +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.componentIdentification=25 */6 * * * -#schedule task every 6 hrs at 0th min + +# Schedule task every 6 hrs at 0th min +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.ldapSync=0 */6 * * * -#schedule task every 24 hrs at 01:00 UTC + +# Schedule task every 24 hrs at 01:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.repoMetaAnalysis=0 1 * * * -#schedule task every 24hrs at 06:00 UTC + +# Schedule task every 24hrs at 06:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.vulnAnalysis=0 6 * * * -#schedule task at 8:05 UTC on Wednesday every week + +# Schedule task at 8:05 UTC on Wednesday every week +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.vulnScanCleanUp=5 8 * * 4 -#schedule task every 5 minutes +# Schedule task every 5 minutes +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.vulnerability.policy.bundle.fetch=*/5 * * * * -#schedule task every 24 hrs at 02:00 UTC +# Schedule task every 24 hrs at 02:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.fortify.ssc.sync=0 2 * * * -#schedule task every 24 hrs at 02:00 UTC + +# Schedule task every 24 hrs at 02:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.defectdojo.sync=0 2 * * * -#schedule task every 24 hrs at 02:00 UTC + +# Schedule task every 24 hrs at 02:00 UTC +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.kenna.sync=0 2 * * * -#schedule task every 15 minutes + +# Schedule task every 15 minutes +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.workflow.state.cleanup=*/15 * * * * -#schedule task at 0 min past every 12th hr + +# Schedule task at 0 min past every 12th hr +# +# @category: Task Scheduling +# @type: cron +# @required task.cron.integrityInitializer=0 */12 * * * -# Optional # Defines the number of write operations to perform during BOM processing before changes are flushed to the database. # Smaller values may lower memory usage of the API server, whereas higher values will improve performance as fewer # network round-trips to the database are necessary. +# +# @category: General +# @type: integer bom.upload.processing.trx.flush.threshold=10000 -# Optional # Defines the duration for how long a workflow step is allowed to remain in PENDING state # after being started. If this duration is exceeded, workflow steps will transition into the TIMED_OUT state. # If they remain in TIMED_OUT for the same duration, they will transition to the FAILED state. # The duration must be specified in ISO8601 notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). +# +# @category: General +# @type: duration workflow.step.timeout.duration=PT1H -# Optional # Defines the duration for how long workflow data is being retained, after all steps transitioned into a non-terminal # state (CANCELLED, COMPLETED, FAILED, NOT_APPLICABLE). # The duration must be specified in ISO8601 notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). +# +# @category: General +# @type: duration workflow.retention.duration=P3D -# Optional # Delays the BOM_PROCESSED notification until the vulnerability analysis associated with a given BOM upload # is completed. The intention being that it is then "safe" to query the API for any identified vulnerabilities. # This is specifically for cases where polling the /api/v1/bom/token/ endpoint is not feasible. # THIS IS A TEMPORARY FUNCTIONALITY AND MAY BE REMOVED IN FUTURE RELEASES WITHOUT FURTHER NOTICE. +# +# @category: General +# @type: boolean tmp.delay.bom.processed.notification=false -# Optional # Specifies whether the Integrity Initializer shall be enabled. +# +# @category: General +# @type: boolean integrity.initializer.enabled=false + +# @category: General +# @type: boolean integrity.check.enabled=false -# Optional # Defines whether vulnerability policy analysis is enabled. +# +# @category: General +# @type: boolean vulnerability.policy.analysis.enabled=false -# Optional # Defines where to fetch the policy bundle from.For S3, just the base url needs to be provided with port # For nginx, the whole url with bundle name needs to be given -vulnerability.policy.bundle.url=http://localhostt:80/bundles/test.zip +# +# @category: General +# @example: http://example.com:80/bundles/bundle.zip +# @type: string +vulnerability.policy.bundle.url= -# Optional # Defines the type of source from which policy bundles are being fetched from. # Required when vulnerability.policy.bundle.url is set. -# Valid options are: -# - NGINX -# - S3 +# +# @category: General +# @type: enum +# @valid-values: [nginx, s3] vulnerability.policy.bundle.source.type=NGINX -#For nginx server, if username and bearer token both are provided, basic auth will be used, +# For nginx server, if username and bearer token both are provided, basic auth will be used, # else the auth header will be added based on the not null values -# Optional # Defines the password to be used for basic authentication against the service hosting the policy bundle. +# +# @category: General +# @type: string vulnerability.policy.bundle.auth.password= -# Optional # Defines the username to be used for basic authentication against the service hosting the policy bundle. +# +# @category: General +# @type: string vulnerability.policy.bundle.auth.username= -# Optional # Defines the token to be used as bearerAuth against the service hosting the policy bundle. +# +# @category: General +# @type: string vulnerability.policy.bundle.bearer.token= -# Optional # S3 related details. Access key, secret key, bucket name and bundle names are mandatory if S3 is chosen. Region is optional +# +# @category: General +# @type: string vulnerability.policy.s3.access.key= + +# @category: General +# @type: string vulnerability.policy.s3.secret.key= + +# @category: General +# @type: string vulnerability.policy.s3.bucket.name= + +# @category: General +# @type: string vulnerability.policy.s3.bundle.name= + +# @category: General +# @type: string vulnerability.policy.s3.region= \ No newline at end of file