diff --git a/CHANGELOG.md b/CHANGELOG.md index 63eb9db9..2a5ad3fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,18 @@ - Fix `KafkaTopic`: fails to create a topic with the replication factor set more than running Kafka nodes - Fix `ServiceIntegration`: sends empty source and destination projects +- Add `Kafka` field `userConfig.follower_fetching`, type `object`: Enable follower fetching +- Change `Kafka` field `userConfig.kafka.sasl_oauthbearer_sub_claim_name`: pattern ~~`^[^\r\n]*$`~~ → + `^[^\r\n]*\S[^\r\n]*$` +- Add `MySQL` field `userConfig.migration.ignore_roles`, type `string`: Comma-separated list of database + roles, which should be ignored during migration (supported by PostgreSQL only at the moment) +- Add `PostgreSQL` field `userConfig.migration.ignore_roles`, type `string`: Comma-separated list of + database roles, which should be ignored during migration (supported by PostgreSQL only at the moment) +- Add `PostgreSQL` field `userConfig.pgbouncer.max_prepared_statements`, type `integer`: PgBouncer tracks + protocol-level named prepared statements related commands sent by the client in transaction and + statement pooling modes when max_prepared_statements is set to a non-zero value +- Add `Redis` field `userConfig.migration.ignore_roles`, type `string`: Comma-separated list of database + roles, which should be ignored during migration (supported by PostgreSQL only at the moment) ## v0.24.0 - 2024-07-16 diff --git a/api/v1alpha1/userconfig/integration/clickhouse_kafka/clickhouse_kafka.go b/api/v1alpha1/userconfig/integration/clickhouse_kafka/clickhouse_kafka.go index a7fc1dc5..fe28ad94 100644 --- a/api/v1alpha1/userconfig/integration/clickhouse_kafka/clickhouse_kafka.go +++ b/api/v1alpha1/userconfig/integration/clickhouse_kafka/clickhouse_kafka.go @@ -76,11 +76,19 @@ type Tables struct { // Maximum amount of messages to be polled in a single Kafka poll PollMaxBatchSize *int `groups:"create,update" json:"poll_max_batch_size,omitempty"` + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=30000 + // Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). + PollMaxTimeoutMs *int `groups:"create,update" json:"poll_max_timeout_ms,omitempty"` + // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=1000000000 // Skip at least this number of broken messages from Kafka topic per block SkipBrokenMessages *int `groups:"create,update" json:"skip_broken_messages,omitempty"` + // Provide an independent thread for each consumer. All consumers run in the same thread by default. + ThreadPerConsumer *bool `groups:"create,update" json:"thread_per_consumer,omitempty"` + // +kubebuilder:validation:MaxItems=100 // Kafka topics Topics []*Topics `groups:"create,update" json:"topics"` diff --git a/api/v1alpha1/userconfig/integration/clickhouse_kafka/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/integration/clickhouse_kafka/zz_generated.deepcopy.go index 78e9862d..e6add784 100644 --- a/api/v1alpha1/userconfig/integration/clickhouse_kafka/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/integration/clickhouse_kafka/zz_generated.deepcopy.go @@ -96,11 +96,21 @@ func (in *Tables) DeepCopyInto(out *Tables) { *out = new(int) **out = **in } + if in.PollMaxTimeoutMs != nil { + in, out := &in.PollMaxTimeoutMs, &out.PollMaxTimeoutMs + *out = new(int) + **out = **in + } if in.SkipBrokenMessages != nil { in, out := &in.SkipBrokenMessages, &out.SkipBrokenMessages *out = new(int) **out = **in } + if in.ThreadPerConsumer != nil { + in, out := &in.ThreadPerConsumer, &out.ThreadPerConsumer + *out = new(bool) + **out = **in + } if in.Topics != nil { in, out := &in.Topics, &out.Topics *out = make([]*Topics, len(*in)) diff --git a/api/v1alpha1/userconfig/service/kafka/kafka.go b/api/v1alpha1/userconfig/service/kafka/kafka.go index 056b4c20..fc6b96c9 100644 --- a/api/v1alpha1/userconfig/service/kafka/kafka.go +++ b/api/v1alpha1/userconfig/service/kafka/kafka.go @@ -3,6 +3,12 @@ package kafkauserconfig +// Enable follower fetching +type FollowerFetching struct { + // Whether to enable the follower fetching functionality + Enabled *bool `groups:"create,update" json:"enabled,omitempty"` +} + // CIDR address block, either as a string, or in a dict with an optional description field type IpFilter struct { // +kubebuilder:validation:MaxLength=1024 @@ -16,211 +22,211 @@ type IpFilter struct { // Kafka broker configuration values type Kafka struct { - // Enable auto creation of topics + // Enable auto-creation of topics. (Default: true) AutoCreateTopicsEnable *bool `groups:"create,update" json:"auto_create_topics_enable,omitempty"` // +kubebuilder:validation:Enum="gzip";"snappy";"lz4";"zstd";"uncompressed";"producer" - // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. + // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.(Default: producer) CompressionType *string `groups:"create,update" json:"compression_type,omitempty"` // +kubebuilder:validation:Minimum=1000 // +kubebuilder:validation:Maximum=3600000 - // Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. + // Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)) ConnectionsMaxIdleMs *int `groups:"create,update" json:"connections_max_idle_ms,omitempty"` // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=10 - // Replication factor for autocreated topics + // Replication factor for auto-created topics (Default: 3) DefaultReplicationFactor *int `groups:"create,update" json:"default_replication_factor,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=300000 - // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)) GroupInitialRebalanceDelayMs *int `groups:"create,update" json:"group_initial_rebalance_delay_ms,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=1800000 - // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes) GroupMaxSessionTimeoutMs *int `groups:"create,update" json:"group_max_session_timeout_ms,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=60000 - // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)) GroupMinSessionTimeoutMs *int `groups:"create,update" json:"group_min_session_timeout_ms,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=315569260000 - // How long are delete records retained? + // How long are delete records retained? (Default: 86400000 (1 day)) LogCleanerDeleteRetentionMs *int `groups:"create,update" json:"log_cleaner_delete_retention_ms,omitempty"` // +kubebuilder:validation:Minimum=30000 - // The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted + // The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)) LogCleanerMaxCompactionLagMs *int `groups:"create,update" json:"log_cleaner_max_compaction_lag_ms,omitempty"` // +kubebuilder:validation:Minimum=0.2 // +kubebuilder:validation:Maximum=0.9 - // Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. + // Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5) LogCleanerMinCleanableRatio *float64 `groups:"create,update" json:"log_cleaner_min_cleanable_ratio,omitempty"` // +kubebuilder:validation:Minimum=0 - // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms) LogCleanerMinCompactionLagMs *int `groups:"create,update" json:"log_cleaner_min_compaction_lag_ms,omitempty"` // +kubebuilder:validation:Enum="delete";"compact";"compact,delete" - // The default cleanup policy for segments beyond the retention window + // The default cleanup policy for segments beyond the retention window (Default: delete) LogCleanupPolicy *string `groups:"create,update" json:"log_cleanup_policy,omitempty"` // +kubebuilder:validation:Minimum=1 - // The number of messages accumulated on a log partition before messages are flushed to disk + // The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)) LogFlushIntervalMessages *int `groups:"create,update" json:"log_flush_interval_messages,omitempty"` // +kubebuilder:validation:Minimum=0 - // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used + // The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null) LogFlushIntervalMs *int `groups:"create,update" json:"log_flush_interval_ms,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=104857600 - // The interval with which Kafka adds an entry to the offset index + // The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)) LogIndexIntervalBytes *int `groups:"create,update" json:"log_index_interval_bytes,omitempty"` // +kubebuilder:validation:Minimum=1048576 // +kubebuilder:validation:Maximum=104857600 - // The maximum size in bytes of the offset index + // The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)) LogIndexSizeMaxBytes *int `groups:"create,update" json:"log_index_size_max_bytes,omitempty"` // +kubebuilder:validation:Minimum=-2 - // The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. + // The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2) LogLocalRetentionBytes *int `groups:"create,update" json:"log_local_retention_bytes,omitempty"` // +kubebuilder:validation:Minimum=-2 - // The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. + // The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2) LogLocalRetentionMs *int `groups:"create,update" json:"log_local_retention_ms,omitempty"` - // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true) LogMessageDownconversionEnable *bool `groups:"create,update" json:"log_message_downconversion_enable,omitempty"` // +kubebuilder:validation:Minimum=0 - // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)) LogMessageTimestampDifferenceMaxMs *int `groups:"create,update" json:"log_message_timestamp_difference_max_ms,omitempty"` // +kubebuilder:validation:Enum="CreateTime";"LogAppendTime" - // Define whether the timestamp in the message is message create time or log append time. + // Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime) LogMessageTimestampType *string `groups:"create,update" json:"log_message_timestamp_type,omitempty"` - // Should pre allocate file when create new segment? + // Should pre allocate file when create new segment? (Default: false) LogPreallocate *bool `groups:"create,update" json:"log_preallocate,omitempty"` // +kubebuilder:validation:Minimum=-1 - // The maximum size of the log before deleting messages + // The maximum size of the log before deleting messages (Default: -1) LogRetentionBytes *int `groups:"create,update" json:"log_retention_bytes,omitempty"` // +kubebuilder:validation:Minimum=-1 // +kubebuilder:validation:Maximum=2147483647 - // The number of hours to keep a log file before deleting it + // The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)) LogRetentionHours *int `groups:"create,update" json:"log_retention_hours,omitempty"` // +kubebuilder:validation:Minimum=-1 - // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies) LogRetentionMs *int `groups:"create,update" json:"log_retention_ms,omitempty"` // +kubebuilder:validation:Minimum=0 - // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null) LogRollJitterMs *int `groups:"create,update" json:"log_roll_jitter_ms,omitempty"` // +kubebuilder:validation:Minimum=1 - // The maximum time before a new log segment is rolled out (in milliseconds). + // The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)) LogRollMs *int `groups:"create,update" json:"log_roll_ms,omitempty"` // +kubebuilder:validation:Minimum=10485760 // +kubebuilder:validation:Maximum=1073741824 - // The maximum size of a single log file + // The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)) LogSegmentBytes *int `groups:"create,update" json:"log_segment_bytes,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=3600000 - // The amount of time to wait before deleting a file from the filesystem + // The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)) LogSegmentDeleteDelayMs *int `groups:"create,update" json:"log_segment_delete_delay_ms,omitempty"` // +kubebuilder:validation:Minimum=256 // +kubebuilder:validation:Maximum=2147483647 - // The maximum number of connections allowed from each ip address (defaults to 2147483647). + // The maximum number of connections allowed from each ip address (Default: 2147483647). MaxConnectionsPerIp *int `groups:"create,update" json:"max_connections_per_ip,omitempty"` // +kubebuilder:validation:Minimum=1000 // +kubebuilder:validation:Maximum=10000 - // The maximum number of incremental fetch sessions that the broker will maintain. + // The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000) MaxIncrementalFetchSessionCacheSlots *int `groups:"create,update" json:"max_incremental_fetch_session_cache_slots,omitempty"` // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=100001200 - // The maximum size of message that the server can receive. + // The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)) MessageMaxBytes *int `groups:"create,update" json:"message_max_bytes,omitempty"` // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=7 - // When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. + // When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1) MinInsyncReplicas *int `groups:"create,update" json:"min_insync_replicas,omitempty"` // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=1000 - // Number of partitions for autocreated topics + // Number of partitions for auto-created topics (Default: 1) NumPartitions *int `groups:"create,update" json:"num_partitions,omitempty"` // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=2147483647 - // Log retention window in minutes for offsets topic + // Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)) OffsetsRetentionMinutes *int `groups:"create,update" json:"offsets_retention_minutes,omitempty"` // +kubebuilder:validation:Minimum=10 // +kubebuilder:validation:Maximum=10000 - // The purge interval (in number of requests) of the producer request purgatory(defaults to 1000). + // The purge interval (in number of requests) of the producer request purgatory (Default: 1000). ProducerPurgatoryPurgeIntervalRequests *int `groups:"create,update" json:"producer_purgatory_purge_interval_requests,omitempty"` // +kubebuilder:validation:Minimum=1048576 // +kubebuilder:validation:Maximum=104857600 - // The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. + // The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)) ReplicaFetchMaxBytes *int `groups:"create,update" json:"replica_fetch_max_bytes,omitempty"` // +kubebuilder:validation:Minimum=10485760 // +kubebuilder:validation:Maximum=1048576000 - // Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. + // Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)) ReplicaFetchResponseMaxBytes *int `groups:"create,update" json:"replica_fetch_response_max_bytes,omitempty"` // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[^\r\n]*$` - // The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. + // The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null) SaslOauthbearerExpectedAudience *string `groups:"create,update" json:"sasl_oauthbearer_expected_audience,omitempty"` // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[^\r\n]*$` - // Optional setting for the broker to use to verify that the JWT was created by the expected issuer. + // Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null) SaslOauthbearerExpectedIssuer *string `groups:"create,update" json:"sasl_oauthbearer_expected_issuer,omitempty"` // +kubebuilder:validation:MaxLength=2048 - // OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. + // OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null) SaslOauthbearerJwksEndpointUrl *string `groups:"create,update" json:"sasl_oauthbearer_jwks_endpoint_url,omitempty"` // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[^\r\n]*$` - // Name of the scope from which to extract the subject claim from the JWT. Defaults to sub. + // +kubebuilder:validation:Pattern=`^[^\r\n]*\S[^\r\n]*$` + // Name of the scope from which to extract the subject claim from the JWT.(Default: sub) SaslOauthbearerSubClaimName *string `groups:"create,update" json:"sasl_oauthbearer_sub_claim_name,omitempty"` // +kubebuilder:validation:Minimum=10485760 // +kubebuilder:validation:Maximum=209715200 - // The maximum number of bytes in a socket request (defaults to 104857600). + // The maximum number of bytes in a socket request (Default: 104857600 bytes). SocketRequestMaxBytes *int `groups:"create,update" json:"socket_request_max_bytes,omitempty"` - // Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition + // Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false) TransactionPartitionVerificationEnable *bool `groups:"create,update" json:"transaction_partition_verification_enable,omitempty"` // +kubebuilder:validation:Minimum=600000 // +kubebuilder:validation:Maximum=3600000 - // The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)). + // The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). TransactionRemoveExpiredTransactionCleanupIntervalMs *int `groups:"create,update" json:"transaction_remove_expired_transaction_cleanup_interval_ms,omitempty"` // +kubebuilder:validation:Minimum=1048576 // +kubebuilder:validation:Maximum=2147483647 - // The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)). + // The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). TransactionStateLogSegmentBytes *int `groups:"create,update" json:"transaction_state_log_segment_bytes,omitempty"` } @@ -509,6 +515,9 @@ type KafkaUserConfig struct { // Serve the web frontend using a custom CNAME pointing to the Aiven DNS name CustomDomain *string `groups:"create,update" json:"custom_domain,omitempty"` + // Enable follower fetching + FollowerFetching *FollowerFetching `groups:"create,update" json:"follower_fetching,omitempty"` + // +kubebuilder:validation:MaxItems=1024 // Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' IpFilter []*IpFilter `groups:"create,update" json:"ip_filter,omitempty"` diff --git a/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go index 6e1772d4..616118fa 100644 --- a/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go @@ -31,6 +31,26 @@ func (in *Aws) DeepCopy() *Aws { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FollowerFetching) DeepCopyInto(out *FollowerFetching) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FollowerFetching. +func (in *FollowerFetching) DeepCopy() *FollowerFetching { + if in == nil { + return nil + } + out := new(FollowerFetching) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IpFilter) DeepCopyInto(out *IpFilter) { *out = *in @@ -524,6 +544,11 @@ func (in *KafkaUserConfig) DeepCopyInto(out *KafkaUserConfig) { *out = new(string) **out = **in } + if in.FollowerFetching != nil { + in, out := &in.FollowerFetching, &out.FollowerFetching + *out = new(FollowerFetching) + (*in).DeepCopyInto(*out) + } if in.IpFilter != nil { in, out := &in.IpFilter, &out.IpFilter *out = make([]*IpFilter, len(*in)) diff --git a/api/v1alpha1/userconfig/service/mysql/mysql.go b/api/v1alpha1/userconfig/service/mysql/mysql.go index 217eb4e5..3f16578b 100644 --- a/api/v1alpha1/userconfig/service/mysql/mysql.go +++ b/api/v1alpha1/userconfig/service/mysql/mysql.go @@ -28,6 +28,10 @@ type Migration struct { // Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment) IgnoreDbs *string `groups:"create,update" json:"ignore_dbs,omitempty"` + // +kubebuilder:validation:MaxLength=2048 + // Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment) + IgnoreRoles *string `groups:"create,update" json:"ignore_roles,omitempty"` + // +kubebuilder:validation:Enum="dump";"replication" // The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types) Method *string `groups:"create,update" json:"method,omitempty"` diff --git a/api/v1alpha1/userconfig/service/mysql/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/mysql/zz_generated.deepcopy.go index c3ddd6f3..3d7967aa 100644 --- a/api/v1alpha1/userconfig/service/mysql/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/mysql/zz_generated.deepcopy.go @@ -39,6 +39,11 @@ func (in *Migration) DeepCopyInto(out *Migration) { *out = new(string) **out = **in } + if in.IgnoreRoles != nil { + in, out := &in.IgnoreRoles, &out.IgnoreRoles + *out = new(string) + **out = **in + } if in.Method != nil { in, out := &in.Method, &out.Method *out = new(string) diff --git a/api/v1alpha1/userconfig/service/pg/pg.go b/api/v1alpha1/userconfig/service/pg/pg.go index 47e2931f..497c56cf 100644 --- a/api/v1alpha1/userconfig/service/pg/pg.go +++ b/api/v1alpha1/userconfig/service/pg/pg.go @@ -28,6 +28,10 @@ type Migration struct { // Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment) IgnoreDbs *string `groups:"create,update" json:"ignore_dbs,omitempty"` + // +kubebuilder:validation:MaxLength=2048 + // Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment) + IgnoreRoles *string `groups:"create,update" json:"ignore_roles,omitempty"` + // +kubebuilder:validation:Enum="dump";"replication" // The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types) Method *string `groups:"create,update" json:"method,omitempty"` @@ -397,6 +401,11 @@ type Pgbouncer struct { // List of parameters to ignore when given in startup packet IgnoreStartupParameters []string `groups:"create,update" json:"ignore_startup_parameters,omitempty"` + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=3000 + // PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. + MaxPreparedStatements *int `groups:"create,update" json:"max_prepared_statements,omitempty"` + // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=10000 // Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. diff --git a/api/v1alpha1/userconfig/service/pg/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/pg/zz_generated.deepcopy.go index 0a4df1fc..d0712bff 100644 --- a/api/v1alpha1/userconfig/service/pg/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/pg/zz_generated.deepcopy.go @@ -39,6 +39,11 @@ func (in *Migration) DeepCopyInto(out *Migration) { *out = new(string) **out = **in } + if in.IgnoreRoles != nil { + in, out := &in.IgnoreRoles, &out.IgnoreRoles + *out = new(string) + **out = **in + } if in.Method != nil { in, out := &in.Method, &out.Method *out = new(string) @@ -655,6 +660,11 @@ func (in *Pgbouncer) DeepCopyInto(out *Pgbouncer) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MaxPreparedStatements != nil { + in, out := &in.MaxPreparedStatements, &out.MaxPreparedStatements + *out = new(int) + **out = **in + } if in.MinPoolSize != nil { in, out := &in.MinPoolSize, &out.MinPoolSize *out = new(int) diff --git a/api/v1alpha1/userconfig/service/redis/redis.go b/api/v1alpha1/userconfig/service/redis/redis.go index eef58878..026742cf 100644 --- a/api/v1alpha1/userconfig/service/redis/redis.go +++ b/api/v1alpha1/userconfig/service/redis/redis.go @@ -28,6 +28,10 @@ type Migration struct { // Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment) IgnoreDbs *string `groups:"create,update" json:"ignore_dbs,omitempty"` + // +kubebuilder:validation:MaxLength=2048 + // Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment) + IgnoreRoles *string `groups:"create,update" json:"ignore_roles,omitempty"` + // +kubebuilder:validation:Enum="dump";"replication" // The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types) Method *string `groups:"create,update" json:"method,omitempty"` diff --git a/api/v1alpha1/userconfig/service/redis/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/redis/zz_generated.deepcopy.go index a92a3949..106c62e9 100644 --- a/api/v1alpha1/userconfig/service/redis/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/redis/zz_generated.deepcopy.go @@ -39,6 +39,11 @@ func (in *Migration) DeepCopyInto(out *Migration) { *out = new(string) **out = **in } + if in.IgnoreRoles != nil { + in, out := &in.IgnoreRoles, &out.IgnoreRoles + *out = new(string) + **out = **in + } if in.Method != nil { in, out := &in.Method, &out.Method *out = new(string) diff --git a/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml b/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml index 48ecc53b..cd8b3c25 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml @@ -246,6 +246,13 @@ spec: to the Aiven DNS name maxLength: 255 type: string + follower_fetching: + description: Enable follower fetching + properties: + enabled: + description: Whether to enable the follower fetching functionality + type: boolean + type: object ip_filter: description: Allow incoming connections from CIDR address block, @@ -272,16 +279,16 @@ spec: description: Kafka broker configuration values properties: auto_create_topics_enable: - description: Enable auto creation of topics + description: "Enable auto-creation of topics. (Default: true)" type: boolean compression_type: description: - Specify the final compression type for a given + "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression - codec set by the producer. + codec set by the producer.(Default: producer)" enum: - gzip - snappy @@ -294,76 +301,83 @@ spec: description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer - than this." + than this. (Default: 600000 ms (10 minutes))" maximum: 3600000 minimum: 1000 type: integer default_replication_factor: - description: Replication factor for autocreated topics + description: + "Replication factor for auto-created topics (Default: + 3)" maximum: 10 minimum: 1 type: integer group_initial_rebalance_delay_ms: description: - The amount of time, in milliseconds, the group + "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + (Default: 3000 ms (3 seconds))" maximum: 300000 minimum: 0 type: integer group_max_session_timeout_ms: description: - The maximum allowed session timeout for registered + "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time - to detect failures. + to detect failures. Default: 1800000 ms (30 minutes)" maximum: 1800000 minimum: 0 type: integer group_min_session_timeout_ms: description: - The minimum allowed session timeout for registered + "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time - to detect failures. + to detect failures. (Default: 6000 ms (6 seconds))" maximum: 60000 minimum: 0 type: integer log_cleaner_delete_retention_ms: - description: How long are delete records retained? + description: + "How long are delete records retained? (Default: + 86400000 (1 day))" maximum: 315569260000 minimum: 0 type: integer log_cleaner_max_compaction_lag_ms: description: - The maximum amount of time message will remain - uncompacted. Only applicable for logs that are being compacted + "The maximum amount of time message will remain + uncompacted. Only applicable for logs that are being compacted. + (Default: 9223372036854775807 ms (Long.MAX_VALUE))" minimum: 30000 type: integer log_cleaner_min_cleanable_ratio: description: - Controls log compactor frequency. Larger value + "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very - high value for this option. + high value for this option. (Default: 0.5)" maximum: 0.9 minimum: 0.2 type: number log_cleaner_min_compaction_lag_ms: description: - The minimum time a message will remain uncompacted + "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + (Default: 0 ms)" minimum: 0 type: integer log_cleanup_policy: description: - The default cleanup policy for segments beyond - the retention window + "The default cleanup policy for segments beyond + the retention window (Default: delete)" enum: - delete - compact @@ -371,238 +385,257 @@ spec: type: string log_flush_interval_messages: description: - The number of messages accumulated on a log partition - before messages are flushed to disk + "The number of messages accumulated on a log + partition before messages are flushed to disk (Default: + 9223372036854775807 (Long.MAX_VALUE))" minimum: 1 type: integer log_flush_interval_ms: description: - The maximum time in ms that a message in any - topic is kept in memory before flushed to disk. If not set, - the value in log.flush.scheduler.interval.ms is used + "The maximum time in ms that a message in any + topic is kept in memory (page-cache) before flushed to disk. + If not set, the value in log.flush.scheduler.interval.ms + is used (Default: null)" minimum: 0 type: integer log_index_interval_bytes: description: - The interval with which Kafka adds an entry to - the offset index + "The interval with which Kafka adds an entry + to the offset index (Default: 4096 bytes (4 kibibytes))" maximum: 104857600 minimum: 0 type: integer log_index_size_max_bytes: - description: The maximum size in bytes of the offset index + description: + "The maximum size in bytes of the offset index + (Default: 10485760 (10 mebibytes))" maximum: 104857600 minimum: 1048576 type: integer log_local_retention_bytes: description: - The maximum size of local log segments that can - grow for a partition before it gets eligible for deletion. + "The maximum size of local log segments that + can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal - to log.retention.bytes value. + to log.retention.bytes value. (Default: -2)" minimum: -2 type: integer log_local_retention_ms: description: - The number of milliseconds to keep the local + "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms - value. + value. (Default: -2)" minimum: -2 type: integer log_message_downconversion_enable: description: - This configuration controls whether down-conversion + "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + (Default: true)" type: boolean log_message_timestamp_difference_max_ms: description: - The maximum difference allowed between the timestamp + "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified - in the message + in the message (Default: 9223372036854775807 (Long.MAX_VALUE))" minimum: 0 type: integer log_message_timestamp_type: description: - Define whether the timestamp in the message is - message create time or log append time. + "Define whether the timestamp in the message + is message create time or log append time. (Default: CreateTime)" enum: - CreateTime - LogAppendTime type: string log_preallocate: - description: Should pre allocate file when create new segment? + description: + "Should pre allocate file when create new segment? + (Default: false)" type: boolean log_retention_bytes: - description: The maximum size of the log before deleting messages + description: + "The maximum size of the log before deleting + messages (Default: -1)" minimum: -1 type: integer log_retention_hours: description: - The number of hours to keep a log file before - deleting it + "The number of hours to keep a log file before + deleting it (Default: 168 hours (1 week))" maximum: 2147483647 minimum: -1 type: integer log_retention_ms: description: - The number of milliseconds to keep a log file + "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time - limit is applied. + limit is applied. (Default: null, log.retention.hours applies)" minimum: -1 type: integer log_roll_jitter_ms: description: - The maximum jitter to subtract from logRollTimeMillis + "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours - is used + is used (Default: null)" minimum: 0 type: integer log_roll_ms: description: - The maximum time before a new log segment is - rolled out (in milliseconds). + "The maximum time before a new log segment is + rolled out (in milliseconds). (Default: null, log.roll.hours + applies (Default: 168, 7 days))" minimum: 1 type: integer log_segment_bytes: - description: The maximum size of a single log file + description: + "The maximum size of a single log file (Default: + 1073741824 bytes (1 gibibyte))" maximum: 1073741824 minimum: 10485760 type: integer log_segment_delete_delay_ms: description: - The amount of time to wait before deleting a - file from the filesystem + "The amount of time to wait before deleting a + file from the filesystem (Default: 60000 ms (1 minute))" maximum: 3600000 minimum: 0 type: integer max_connections_per_ip: description: - The maximum number of connections allowed from - each ip address (defaults to 2147483647). + "The maximum number of connections allowed from + each ip address (Default: 2147483647)." maximum: 2147483647 minimum: 256 type: integer max_incremental_fetch_session_cache_slots: description: - The maximum number of incremental fetch sessions - that the broker will maintain. + "The maximum number of incremental fetch sessions + that the broker will maintain. (Default: 1000)" maximum: 10000 minimum: 1000 type: integer message_max_bytes: description: - The maximum size of message that the server can - receive. + "The maximum size of message that the server + can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes))" maximum: 100001200 minimum: 0 type: integer min_insync_replicas: description: - When a producer sets acks to 'all' (or '-1'), + "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered - successful. + successful. (Default: 1)" maximum: 7 minimum: 1 type: integer num_partitions: - description: Number of partitions for autocreated topics + description: + "Number of partitions for auto-created topics + (Default: 1)" maximum: 1000 minimum: 1 type: integer offsets_retention_minutes: - description: Log retention window in minutes for offsets topic + description: + "Log retention window in minutes for offsets + topic (Default: 10080 minutes (7 days))" maximum: 2147483647 minimum: 1 type: integer producer_purgatory_purge_interval_requests: description: - The purge interval (in number of requests) of - the producer request purgatory(defaults to 1000). + "The purge interval (in number of requests) of + the producer request purgatory (Default: 1000)." maximum: 10000 minimum: 10 type: integer replica_fetch_max_bytes: description: - The number of bytes of messages to attempt to - fetch for each partition (defaults to 1048576). This is - not an absolute maximum, if the first record batch in the - first non-empty partition of the fetch is larger than this - value, the record batch will still be returned to ensure - that progress can be made. + "The number of bytes of messages to attempt to + fetch for each partition . This is not an absolute maximum, + if the first record batch in the first non-empty partition + of the fetch is larger than this value, the record batch + will still be returned to ensure that progress can be made. + (Default: 1048576 bytes (1 mebibytes))" maximum: 104857600 minimum: 1048576 type: integer replica_fetch_response_max_bytes: description: - Maximum bytes expected for the entire fetch response - (defaults to 10485760). Records are fetched in batches, - and if the first record batch in the first non-empty partition - of the fetch is larger than this value, the record batch - will still be returned to ensure that progress can be made. - As such, this is not an absolute maximum. + "Maximum bytes expected for the entire fetch + response. Records are fetched in batches, and if the first + record batch in the first non-empty partition of the fetch + is larger than this value, the record batch will still be + returned to ensure that progress can be made. As such, this + is not an absolute maximum. (Default: 10485760 bytes (10 + mebibytes))" maximum: 1048576000 minimum: 10485760 type: integer sasl_oauthbearer_expected_audience: description: - The (optional) comma-delimited setting for the + "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one - of the expected audiences. + of the expected audiences. (Default: null)" maxLength: 128 pattern: ^[^\r\n]*$ type: string sasl_oauthbearer_expected_issuer: description: - Optional setting for the broker to use to verify - that the JWT was created by the expected issuer. + "Optional setting for the broker to use to verify + that the JWT was created by the expected issuer.(Default: + null)" maxLength: 128 pattern: ^[^\r\n]*$ type: string sasl_oauthbearer_jwks_endpoint_url: description: - OIDC JWKS endpoint URL. By setting this the SASL - SSL OAuth2/OIDC authentication is enabled. See also other - options for SASL OAuth2/OIDC. + "OIDC JWKS endpoint URL. By setting this the + SASL SSL OAuth2/OIDC authentication is enabled. See also + other options for SASL OAuth2/OIDC. (Default: null)" maxLength: 2048 type: string sasl_oauthbearer_sub_claim_name: description: - Name of the scope from which to extract the subject - claim from the JWT. Defaults to sub. + "Name of the scope from which to extract the + subject claim from the JWT.(Default: sub)" maxLength: 128 - pattern: ^[^\r\n]*$ + pattern: ^[^\r\n]*\S[^\r\n]*$ type: string socket_request_max_bytes: description: - The maximum number of bytes in a socket request - (defaults to 104857600). + "The maximum number of bytes in a socket request + (Default: 104857600 bytes)." maximum: 209715200 minimum: 10485760 type: integer transaction_partition_verification_enable: description: - Enable verification that checks that the partition + "Enable verification that checks that the partition has been added to the transaction before writing transactional - records to the partition + records to the partition. (Default: false)" type: boolean transaction_remove_expired_transaction_cleanup_interval_ms: description: - The interval at which to remove transactions + "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms - passing (defaults to 3600000 (1 hour)). + passing (Default: 3600000 ms (1 hour))." maximum: 3600000 minimum: 600000 type: integer transaction_state_log_segment_bytes: description: - The transaction topic segment bytes should be + "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log - compaction and cache loads (defaults to 104857600 (100 mebibytes)). + compaction and cache loads (Default: 104857600 bytes (100 + mebibytes))." maximum: 2147483647 minimum: 1048576 type: integer diff --git a/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml b/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml index d786c9ac..b7eefee8 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_mysqls.yaml @@ -318,6 +318,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported diff --git a/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml b/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml index 3310fd87..dbec0116 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_postgresqls.yaml @@ -314,6 +314,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported @@ -909,6 +916,17 @@ spec: type: string maxItems: 32 type: array + max_prepared_statements: + description: + PgBouncer tracks protocol-level named prepared + statements related commands sent by the client in transaction + and statement pooling modes when max_prepared_statements + is set to a non-zero value. Setting it to 0 disables prepared + statements. max_prepared_statements defaults to 100, and + its maximum is 3000. + maximum: 3000 + minimum: 0 + type: integer min_pool_size: description: Add more server connections to pool if below diff --git a/charts/aiven-operator-crds/templates/aiven.io_redis.yaml b/charts/aiven-operator-crds/templates/aiven.io_redis.yaml index 7692367d..db2c278e 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_redis.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_redis.yaml @@ -272,6 +272,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported diff --git a/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml b/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml index 3ebab27b..825f7270 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_serviceintegrations.yaml @@ -181,6 +181,14 @@ spec: maximum: 1000000000 minimum: 0 type: integer + poll_max_timeout_ms: + description: + Timeout in milliseconds for a single poll from + Kafka. Takes the value of the stream_flush_interval_ms + server setting by default (500ms). + maximum: 30000 + minimum: 0 + type: integer skip_broken_messages: description: Skip at least this number of broken messages @@ -188,6 +196,11 @@ spec: maximum: 1000000000 minimum: 0 type: integer + thread_per_consumer: + description: + Provide an independent thread for each consumer. + All consumers run in the same thread by default. + type: boolean topics: description: Kafka topics items: diff --git a/config/crd/bases/aiven.io_kafkas.yaml b/config/crd/bases/aiven.io_kafkas.yaml index 48ecc53b..cd8b3c25 100644 --- a/config/crd/bases/aiven.io_kafkas.yaml +++ b/config/crd/bases/aiven.io_kafkas.yaml @@ -246,6 +246,13 @@ spec: to the Aiven DNS name maxLength: 255 type: string + follower_fetching: + description: Enable follower fetching + properties: + enabled: + description: Whether to enable the follower fetching functionality + type: boolean + type: object ip_filter: description: Allow incoming connections from CIDR address block, @@ -272,16 +279,16 @@ spec: description: Kafka broker configuration values properties: auto_create_topics_enable: - description: Enable auto creation of topics + description: "Enable auto-creation of topics. (Default: true)" type: boolean compression_type: description: - Specify the final compression type for a given + "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression - codec set by the producer. + codec set by the producer.(Default: producer)" enum: - gzip - snappy @@ -294,76 +301,83 @@ spec: description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer - than this." + than this. (Default: 600000 ms (10 minutes))" maximum: 3600000 minimum: 1000 type: integer default_replication_factor: - description: Replication factor for autocreated topics + description: + "Replication factor for auto-created topics (Default: + 3)" maximum: 10 minimum: 1 type: integer group_initial_rebalance_delay_ms: description: - The amount of time, in milliseconds, the group + "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + (Default: 3000 ms (3 seconds))" maximum: 300000 minimum: 0 type: integer group_max_session_timeout_ms: description: - The maximum allowed session timeout for registered + "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time - to detect failures. + to detect failures. Default: 1800000 ms (30 minutes)" maximum: 1800000 minimum: 0 type: integer group_min_session_timeout_ms: description: - The minimum allowed session timeout for registered + "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time - to detect failures. + to detect failures. (Default: 6000 ms (6 seconds))" maximum: 60000 minimum: 0 type: integer log_cleaner_delete_retention_ms: - description: How long are delete records retained? + description: + "How long are delete records retained? (Default: + 86400000 (1 day))" maximum: 315569260000 minimum: 0 type: integer log_cleaner_max_compaction_lag_ms: description: - The maximum amount of time message will remain - uncompacted. Only applicable for logs that are being compacted + "The maximum amount of time message will remain + uncompacted. Only applicable for logs that are being compacted. + (Default: 9223372036854775807 ms (Long.MAX_VALUE))" minimum: 30000 type: integer log_cleaner_min_cleanable_ratio: description: - Controls log compactor frequency. Larger value + "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very - high value for this option. + high value for this option. (Default: 0.5)" maximum: 0.9 minimum: 0.2 type: number log_cleaner_min_compaction_lag_ms: description: - The minimum time a message will remain uncompacted + "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + (Default: 0 ms)" minimum: 0 type: integer log_cleanup_policy: description: - The default cleanup policy for segments beyond - the retention window + "The default cleanup policy for segments beyond + the retention window (Default: delete)" enum: - delete - compact @@ -371,238 +385,257 @@ spec: type: string log_flush_interval_messages: description: - The number of messages accumulated on a log partition - before messages are flushed to disk + "The number of messages accumulated on a log + partition before messages are flushed to disk (Default: + 9223372036854775807 (Long.MAX_VALUE))" minimum: 1 type: integer log_flush_interval_ms: description: - The maximum time in ms that a message in any - topic is kept in memory before flushed to disk. If not set, - the value in log.flush.scheduler.interval.ms is used + "The maximum time in ms that a message in any + topic is kept in memory (page-cache) before flushed to disk. + If not set, the value in log.flush.scheduler.interval.ms + is used (Default: null)" minimum: 0 type: integer log_index_interval_bytes: description: - The interval with which Kafka adds an entry to - the offset index + "The interval with which Kafka adds an entry + to the offset index (Default: 4096 bytes (4 kibibytes))" maximum: 104857600 minimum: 0 type: integer log_index_size_max_bytes: - description: The maximum size in bytes of the offset index + description: + "The maximum size in bytes of the offset index + (Default: 10485760 (10 mebibytes))" maximum: 104857600 minimum: 1048576 type: integer log_local_retention_bytes: description: - The maximum size of local log segments that can - grow for a partition before it gets eligible for deletion. + "The maximum size of local log segments that + can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal - to log.retention.bytes value. + to log.retention.bytes value. (Default: -2)" minimum: -2 type: integer log_local_retention_ms: description: - The number of milliseconds to keep the local + "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms - value. + value. (Default: -2)" minimum: -2 type: integer log_message_downconversion_enable: description: - This configuration controls whether down-conversion + "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + (Default: true)" type: boolean log_message_timestamp_difference_max_ms: description: - The maximum difference allowed between the timestamp + "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified - in the message + in the message (Default: 9223372036854775807 (Long.MAX_VALUE))" minimum: 0 type: integer log_message_timestamp_type: description: - Define whether the timestamp in the message is - message create time or log append time. + "Define whether the timestamp in the message + is message create time or log append time. (Default: CreateTime)" enum: - CreateTime - LogAppendTime type: string log_preallocate: - description: Should pre allocate file when create new segment? + description: + "Should pre allocate file when create new segment? + (Default: false)" type: boolean log_retention_bytes: - description: The maximum size of the log before deleting messages + description: + "The maximum size of the log before deleting + messages (Default: -1)" minimum: -1 type: integer log_retention_hours: description: - The number of hours to keep a log file before - deleting it + "The number of hours to keep a log file before + deleting it (Default: 168 hours (1 week))" maximum: 2147483647 minimum: -1 type: integer log_retention_ms: description: - The number of milliseconds to keep a log file + "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time - limit is applied. + limit is applied. (Default: null, log.retention.hours applies)" minimum: -1 type: integer log_roll_jitter_ms: description: - The maximum jitter to subtract from logRollTimeMillis + "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours - is used + is used (Default: null)" minimum: 0 type: integer log_roll_ms: description: - The maximum time before a new log segment is - rolled out (in milliseconds). + "The maximum time before a new log segment is + rolled out (in milliseconds). (Default: null, log.roll.hours + applies (Default: 168, 7 days))" minimum: 1 type: integer log_segment_bytes: - description: The maximum size of a single log file + description: + "The maximum size of a single log file (Default: + 1073741824 bytes (1 gibibyte))" maximum: 1073741824 minimum: 10485760 type: integer log_segment_delete_delay_ms: description: - The amount of time to wait before deleting a - file from the filesystem + "The amount of time to wait before deleting a + file from the filesystem (Default: 60000 ms (1 minute))" maximum: 3600000 minimum: 0 type: integer max_connections_per_ip: description: - The maximum number of connections allowed from - each ip address (defaults to 2147483647). + "The maximum number of connections allowed from + each ip address (Default: 2147483647)." maximum: 2147483647 minimum: 256 type: integer max_incremental_fetch_session_cache_slots: description: - The maximum number of incremental fetch sessions - that the broker will maintain. + "The maximum number of incremental fetch sessions + that the broker will maintain. (Default: 1000)" maximum: 10000 minimum: 1000 type: integer message_max_bytes: description: - The maximum size of message that the server can - receive. + "The maximum size of message that the server + can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes))" maximum: 100001200 minimum: 0 type: integer min_insync_replicas: description: - When a producer sets acks to 'all' (or '-1'), + "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered - successful. + successful. (Default: 1)" maximum: 7 minimum: 1 type: integer num_partitions: - description: Number of partitions for autocreated topics + description: + "Number of partitions for auto-created topics + (Default: 1)" maximum: 1000 minimum: 1 type: integer offsets_retention_minutes: - description: Log retention window in minutes for offsets topic + description: + "Log retention window in minutes for offsets + topic (Default: 10080 minutes (7 days))" maximum: 2147483647 minimum: 1 type: integer producer_purgatory_purge_interval_requests: description: - The purge interval (in number of requests) of - the producer request purgatory(defaults to 1000). + "The purge interval (in number of requests) of + the producer request purgatory (Default: 1000)." maximum: 10000 minimum: 10 type: integer replica_fetch_max_bytes: description: - The number of bytes of messages to attempt to - fetch for each partition (defaults to 1048576). This is - not an absolute maximum, if the first record batch in the - first non-empty partition of the fetch is larger than this - value, the record batch will still be returned to ensure - that progress can be made. + "The number of bytes of messages to attempt to + fetch for each partition . This is not an absolute maximum, + if the first record batch in the first non-empty partition + of the fetch is larger than this value, the record batch + will still be returned to ensure that progress can be made. + (Default: 1048576 bytes (1 mebibytes))" maximum: 104857600 minimum: 1048576 type: integer replica_fetch_response_max_bytes: description: - Maximum bytes expected for the entire fetch response - (defaults to 10485760). Records are fetched in batches, - and if the first record batch in the first non-empty partition - of the fetch is larger than this value, the record batch - will still be returned to ensure that progress can be made. - As such, this is not an absolute maximum. + "Maximum bytes expected for the entire fetch + response. Records are fetched in batches, and if the first + record batch in the first non-empty partition of the fetch + is larger than this value, the record batch will still be + returned to ensure that progress can be made. As such, this + is not an absolute maximum. (Default: 10485760 bytes (10 + mebibytes))" maximum: 1048576000 minimum: 10485760 type: integer sasl_oauthbearer_expected_audience: description: - The (optional) comma-delimited setting for the + "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one - of the expected audiences. + of the expected audiences. (Default: null)" maxLength: 128 pattern: ^[^\r\n]*$ type: string sasl_oauthbearer_expected_issuer: description: - Optional setting for the broker to use to verify - that the JWT was created by the expected issuer. + "Optional setting for the broker to use to verify + that the JWT was created by the expected issuer.(Default: + null)" maxLength: 128 pattern: ^[^\r\n]*$ type: string sasl_oauthbearer_jwks_endpoint_url: description: - OIDC JWKS endpoint URL. By setting this the SASL - SSL OAuth2/OIDC authentication is enabled. See also other - options for SASL OAuth2/OIDC. + "OIDC JWKS endpoint URL. By setting this the + SASL SSL OAuth2/OIDC authentication is enabled. See also + other options for SASL OAuth2/OIDC. (Default: null)" maxLength: 2048 type: string sasl_oauthbearer_sub_claim_name: description: - Name of the scope from which to extract the subject - claim from the JWT. Defaults to sub. + "Name of the scope from which to extract the + subject claim from the JWT.(Default: sub)" maxLength: 128 - pattern: ^[^\r\n]*$ + pattern: ^[^\r\n]*\S[^\r\n]*$ type: string socket_request_max_bytes: description: - The maximum number of bytes in a socket request - (defaults to 104857600). + "The maximum number of bytes in a socket request + (Default: 104857600 bytes)." maximum: 209715200 minimum: 10485760 type: integer transaction_partition_verification_enable: description: - Enable verification that checks that the partition + "Enable verification that checks that the partition has been added to the transaction before writing transactional - records to the partition + records to the partition. (Default: false)" type: boolean transaction_remove_expired_transaction_cleanup_interval_ms: description: - The interval at which to remove transactions + "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms - passing (defaults to 3600000 (1 hour)). + passing (Default: 3600000 ms (1 hour))." maximum: 3600000 minimum: 600000 type: integer transaction_state_log_segment_bytes: description: - The transaction topic segment bytes should be + "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log - compaction and cache loads (defaults to 104857600 (100 mebibytes)). + compaction and cache loads (Default: 104857600 bytes (100 + mebibytes))." maximum: 2147483647 minimum: 1048576 type: integer diff --git a/config/crd/bases/aiven.io_mysqls.yaml b/config/crd/bases/aiven.io_mysqls.yaml index d786c9ac..b7eefee8 100644 --- a/config/crd/bases/aiven.io_mysqls.yaml +++ b/config/crd/bases/aiven.io_mysqls.yaml @@ -318,6 +318,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported diff --git a/config/crd/bases/aiven.io_postgresqls.yaml b/config/crd/bases/aiven.io_postgresqls.yaml index 3310fd87..dbec0116 100644 --- a/config/crd/bases/aiven.io_postgresqls.yaml +++ b/config/crd/bases/aiven.io_postgresqls.yaml @@ -314,6 +314,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported @@ -909,6 +916,17 @@ spec: type: string maxItems: 32 type: array + max_prepared_statements: + description: + PgBouncer tracks protocol-level named prepared + statements related commands sent by the client in transaction + and statement pooling modes when max_prepared_statements + is set to a non-zero value. Setting it to 0 disables prepared + statements. max_prepared_statements defaults to 100, and + its maximum is 3000. + maximum: 3000 + minimum: 0 + type: integer min_pool_size: description: Add more server connections to pool if below diff --git a/config/crd/bases/aiven.io_redis.yaml b/config/crd/bases/aiven.io_redis.yaml index 7692367d..db2c278e 100644 --- a/config/crd/bases/aiven.io_redis.yaml +++ b/config/crd/bases/aiven.io_redis.yaml @@ -272,6 +272,13 @@ spec: only at the moment) maxLength: 2048 type: string + ignore_roles: + description: + Comma-separated list of database roles, which + should be ignored during migration (supported by PostgreSQL + only at the moment) + maxLength: 2048 + type: string method: description: The migration method to be used (currently supported diff --git a/config/crd/bases/aiven.io_serviceintegrations.yaml b/config/crd/bases/aiven.io_serviceintegrations.yaml index 3ebab27b..825f7270 100644 --- a/config/crd/bases/aiven.io_serviceintegrations.yaml +++ b/config/crd/bases/aiven.io_serviceintegrations.yaml @@ -181,6 +181,14 @@ spec: maximum: 1000000000 minimum: 0 type: integer + poll_max_timeout_ms: + description: + Timeout in milliseconds for a single poll from + Kafka. Takes the value of the stream_flush_interval_ms + server setting by default (500ms). + maximum: 30000 + minimum: 0 + type: integer skip_broken_messages: description: Skip at least this number of broken messages @@ -188,6 +196,11 @@ spec: maximum: 1000000000 minimum: 0 type: integer + thread_per_consumer: + description: + Provide an independent thread for each consumer. + All consumers run in the same thread by default. + type: boolean topics: description: Kafka topics items: diff --git a/docs/docs/api-reference/kafka.md b/docs/docs/api-reference/kafka.md index 7dafa98c..583f9fcc 100644 --- a/docs/docs/api-reference/kafka.md +++ b/docs/docs/api-reference/kafka.md @@ -206,6 +206,7 @@ Kafka specific user configuration options. - [`additional_backup_regions`](#spec.userConfig.additional_backup_regions-property){: name='spec.userConfig.additional_backup_regions-property'} (array of strings, MaxItems: 1). Deprecated. Additional Cloud Regions for Backup Replication. - [`aiven_kafka_topic_messages`](#spec.userConfig.aiven_kafka_topic_messages-property){: name='spec.userConfig.aiven_kafka_topic_messages-property'} (boolean). Allow access to read Kafka topic messages in the Aiven Console and REST API. - [`custom_domain`](#spec.userConfig.custom_domain-property){: name='spec.userConfig.custom_domain-property'} (string, MaxLength: 255). Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. +- [`follower_fetching`](#spec.userConfig.follower_fetching-property){: name='spec.userConfig.follower_fetching-property'} (object). Enable follower fetching. See below for [nested schema](#spec.userConfig.follower_fetching). - [`ip_filter`](#spec.userConfig.ip_filter-property){: name='spec.userConfig.ip_filter-property'} (array of objects, MaxItems: 1024). Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`. See below for [nested schema](#spec.userConfig.ip_filter). - [`kafka`](#spec.userConfig.kafka-property){: name='spec.userConfig.kafka-property'} (object). Kafka broker configuration values. See below for [nested schema](#spec.userConfig.kafka). - [`kafka_authentication_methods`](#spec.userConfig.kafka_authentication_methods-property){: name='spec.userConfig.kafka_authentication_methods-property'} (object). Kafka authentication methods. See below for [nested schema](#spec.userConfig.kafka_authentication_methods). @@ -226,6 +227,16 @@ Kafka specific user configuration options. - [`static_ips`](#spec.userConfig.static_ips-property){: name='spec.userConfig.static_ips-property'} (boolean). Use static public IP addresses. - [`tiered_storage`](#spec.userConfig.tiered_storage-property){: name='spec.userConfig.tiered_storage-property'} (object). Tiered storage configuration. See below for [nested schema](#spec.userConfig.tiered_storage). +### follower_fetching {: #spec.userConfig.follower_fetching } + +_Appears on [`spec.userConfig`](#spec.userConfig)._ + +Enable follower fetching. + +**Required** + +- [`enabled`](#spec.userConfig.follower_fetching.enabled-property){: name='spec.userConfig.follower_fetching.enabled-property'} (boolean). Whether to enable the follower fetching functionality. + ### ip_filter {: #spec.userConfig.ip_filter } _Appears on [`spec.userConfig`](#spec.userConfig)._ @@ -248,52 +259,52 @@ Kafka broker configuration values. **Optional** -- [`auto_create_topics_enable`](#spec.userConfig.kafka.auto_create_topics_enable-property){: name='spec.userConfig.kafka.auto_create_topics_enable-property'} (boolean). Enable auto creation of topics. -- [`compression_type`](#spec.userConfig.kafka.compression_type-property){: name='spec.userConfig.kafka.compression_type-property'} (string, Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`). Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer. -- [`connections_max_idle_ms`](#spec.userConfig.kafka.connections_max_idle_ms-property){: name='spec.userConfig.kafka.connections_max_idle_ms-property'} (integer, Minimum: 1000, Maximum: 3600000). Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. -- [`default_replication_factor`](#spec.userConfig.kafka.default_replication_factor-property){: name='spec.userConfig.kafka.default_replication_factor-property'} (integer, Minimum: 1, Maximum: 10). Replication factor for autocreated topics. -- [`group_initial_rebalance_delay_ms`](#spec.userConfig.kafka.group_initial_rebalance_delay_ms-property){: name='spec.userConfig.kafka.group_initial_rebalance_delay_ms-property'} (integer, Minimum: 0, Maximum: 300000). The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. -- [`group_max_session_timeout_ms`](#spec.userConfig.kafka.group_max_session_timeout_ms-property){: name='spec.userConfig.kafka.group_max_session_timeout_ms-property'} (integer, Minimum: 0, Maximum: 1800000). The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. -- [`group_min_session_timeout_ms`](#spec.userConfig.kafka.group_min_session_timeout_ms-property){: name='spec.userConfig.kafka.group_min_session_timeout_ms-property'} (integer, Minimum: 0, Maximum: 60000). The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. -- [`log_cleaner_delete_retention_ms`](#spec.userConfig.kafka.log_cleaner_delete_retention_ms-property){: name='spec.userConfig.kafka.log_cleaner_delete_retention_ms-property'} (integer, Minimum: 0, Maximum: 315569260000). How long are delete records retained?. -- [`log_cleaner_max_compaction_lag_ms`](#spec.userConfig.kafka.log_cleaner_max_compaction_lag_ms-property){: name='spec.userConfig.kafka.log_cleaner_max_compaction_lag_ms-property'} (integer, Minimum: 30000). The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. -- [`log_cleaner_min_cleanable_ratio`](#spec.userConfig.kafka.log_cleaner_min_cleanable_ratio-property){: name='spec.userConfig.kafka.log_cleaner_min_cleanable_ratio-property'} (number, Minimum: 0.2, Maximum: 0.9). Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. -- [`log_cleaner_min_compaction_lag_ms`](#spec.userConfig.kafka.log_cleaner_min_compaction_lag_ms-property){: name='spec.userConfig.kafka.log_cleaner_min_compaction_lag_ms-property'} (integer, Minimum: 0). The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. -- [`log_cleanup_policy`](#spec.userConfig.kafka.log_cleanup_policy-property){: name='spec.userConfig.kafka.log_cleanup_policy-property'} (string, Enum: `delete`, `compact`, `compact,delete`). The default cleanup policy for segments beyond the retention window. -- [`log_flush_interval_messages`](#spec.userConfig.kafka.log_flush_interval_messages-property){: name='spec.userConfig.kafka.log_flush_interval_messages-property'} (integer, Minimum: 1). The number of messages accumulated on a log partition before messages are flushed to disk. -- [`log_flush_interval_ms`](#spec.userConfig.kafka.log_flush_interval_ms-property){: name='spec.userConfig.kafka.log_flush_interval_ms-property'} (integer, Minimum: 0). The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. -- [`log_index_interval_bytes`](#spec.userConfig.kafka.log_index_interval_bytes-property){: name='spec.userConfig.kafka.log_index_interval_bytes-property'} (integer, Minimum: 0, Maximum: 104857600). The interval with which Kafka adds an entry to the offset index. -- [`log_index_size_max_bytes`](#spec.userConfig.kafka.log_index_size_max_bytes-property){: name='spec.userConfig.kafka.log_index_size_max_bytes-property'} (integer, Minimum: 1048576, Maximum: 104857600). The maximum size in bytes of the offset index. -- [`log_local_retention_bytes`](#spec.userConfig.kafka.log_local_retention_bytes-property){: name='spec.userConfig.kafka.log_local_retention_bytes-property'} (integer, Minimum: -2). The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. -- [`log_local_retention_ms`](#spec.userConfig.kafka.log_local_retention_ms-property){: name='spec.userConfig.kafka.log_local_retention_ms-property'} (integer, Minimum: -2). The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. -- [`log_message_downconversion_enable`](#spec.userConfig.kafka.log_message_downconversion_enable-property){: name='spec.userConfig.kafka.log_message_downconversion_enable-property'} (boolean). This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. -- [`log_message_timestamp_difference_max_ms`](#spec.userConfig.kafka.log_message_timestamp_difference_max_ms-property){: name='spec.userConfig.kafka.log_message_timestamp_difference_max_ms-property'} (integer, Minimum: 0). The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. -- [`log_message_timestamp_type`](#spec.userConfig.kafka.log_message_timestamp_type-property){: name='spec.userConfig.kafka.log_message_timestamp_type-property'} (string, Enum: `CreateTime`, `LogAppendTime`). Define whether the timestamp in the message is message create time or log append time. -- [`log_preallocate`](#spec.userConfig.kafka.log_preallocate-property){: name='spec.userConfig.kafka.log_preallocate-property'} (boolean). Should pre allocate file when create new segment?. -- [`log_retention_bytes`](#spec.userConfig.kafka.log_retention_bytes-property){: name='spec.userConfig.kafka.log_retention_bytes-property'} (integer, Minimum: -1). The maximum size of the log before deleting messages. -- [`log_retention_hours`](#spec.userConfig.kafka.log_retention_hours-property){: name='spec.userConfig.kafka.log_retention_hours-property'} (integer, Minimum: -1, Maximum: 2147483647). The number of hours to keep a log file before deleting it. -- [`log_retention_ms`](#spec.userConfig.kafka.log_retention_ms-property){: name='spec.userConfig.kafka.log_retention_ms-property'} (integer, Minimum: -1). The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. -- [`log_roll_jitter_ms`](#spec.userConfig.kafka.log_roll_jitter_ms-property){: name='spec.userConfig.kafka.log_roll_jitter_ms-property'} (integer, Minimum: 0). The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. -- [`log_roll_ms`](#spec.userConfig.kafka.log_roll_ms-property){: name='spec.userConfig.kafka.log_roll_ms-property'} (integer, Minimum: 1). The maximum time before a new log segment is rolled out (in milliseconds). -- [`log_segment_bytes`](#spec.userConfig.kafka.log_segment_bytes-property){: name='spec.userConfig.kafka.log_segment_bytes-property'} (integer, Minimum: 10485760, Maximum: 1073741824). The maximum size of a single log file. -- [`log_segment_delete_delay_ms`](#spec.userConfig.kafka.log_segment_delete_delay_ms-property){: name='spec.userConfig.kafka.log_segment_delete_delay_ms-property'} (integer, Minimum: 0, Maximum: 3600000). The amount of time to wait before deleting a file from the filesystem. -- [`max_connections_per_ip`](#spec.userConfig.kafka.max_connections_per_ip-property){: name='spec.userConfig.kafka.max_connections_per_ip-property'} (integer, Minimum: 256, Maximum: 2147483647). The maximum number of connections allowed from each ip address (defaults to 2147483647). -- [`max_incremental_fetch_session_cache_slots`](#spec.userConfig.kafka.max_incremental_fetch_session_cache_slots-property){: name='spec.userConfig.kafka.max_incremental_fetch_session_cache_slots-property'} (integer, Minimum: 1000, Maximum: 10000). The maximum number of incremental fetch sessions that the broker will maintain. -- [`message_max_bytes`](#spec.userConfig.kafka.message_max_bytes-property){: name='spec.userConfig.kafka.message_max_bytes-property'} (integer, Minimum: 0, Maximum: 100001200). The maximum size of message that the server can receive. -- [`min_insync_replicas`](#spec.userConfig.kafka.min_insync_replicas-property){: name='spec.userConfig.kafka.min_insync_replicas-property'} (integer, Minimum: 1, Maximum: 7). When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. -- [`num_partitions`](#spec.userConfig.kafka.num_partitions-property){: name='spec.userConfig.kafka.num_partitions-property'} (integer, Minimum: 1, Maximum: 1000). Number of partitions for autocreated topics. -- [`offsets_retention_minutes`](#spec.userConfig.kafka.offsets_retention_minutes-property){: name='spec.userConfig.kafka.offsets_retention_minutes-property'} (integer, Minimum: 1, Maximum: 2147483647). Log retention window in minutes for offsets topic. -- [`producer_purgatory_purge_interval_requests`](#spec.userConfig.kafka.producer_purgatory_purge_interval_requests-property){: name='spec.userConfig.kafka.producer_purgatory_purge_interval_requests-property'} (integer, Minimum: 10, Maximum: 10000). The purge interval (in number of requests) of the producer request purgatory(defaults to 1000). -- [`replica_fetch_max_bytes`](#spec.userConfig.kafka.replica_fetch_max_bytes-property){: name='spec.userConfig.kafka.replica_fetch_max_bytes-property'} (integer, Minimum: 1048576, Maximum: 104857600). The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. -- [`replica_fetch_response_max_bytes`](#spec.userConfig.kafka.replica_fetch_response_max_bytes-property){: name='spec.userConfig.kafka.replica_fetch_response_max_bytes-property'} (integer, Minimum: 10485760, Maximum: 1048576000). Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. -- [`sasl_oauthbearer_expected_audience`](#spec.userConfig.kafka.sasl_oauthbearer_expected_audience-property){: name='spec.userConfig.kafka.sasl_oauthbearer_expected_audience-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 128). The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. -- [`sasl_oauthbearer_expected_issuer`](#spec.userConfig.kafka.sasl_oauthbearer_expected_issuer-property){: name='spec.userConfig.kafka.sasl_oauthbearer_expected_issuer-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 128). Optional setting for the broker to use to verify that the JWT was created by the expected issuer. -- [`sasl_oauthbearer_jwks_endpoint_url`](#spec.userConfig.kafka.sasl_oauthbearer_jwks_endpoint_url-property){: name='spec.userConfig.kafka.sasl_oauthbearer_jwks_endpoint_url-property'} (string, MaxLength: 2048). OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. -- [`sasl_oauthbearer_sub_claim_name`](#spec.userConfig.kafka.sasl_oauthbearer_sub_claim_name-property){: name='spec.userConfig.kafka.sasl_oauthbearer_sub_claim_name-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 128). Name of the scope from which to extract the subject claim from the JWT. Defaults to sub. -- [`socket_request_max_bytes`](#spec.userConfig.kafka.socket_request_max_bytes-property){: name='spec.userConfig.kafka.socket_request_max_bytes-property'} (integer, Minimum: 10485760, Maximum: 209715200). The maximum number of bytes in a socket request (defaults to 104857600). -- [`transaction_partition_verification_enable`](#spec.userConfig.kafka.transaction_partition_verification_enable-property){: name='spec.userConfig.kafka.transaction_partition_verification_enable-property'} (boolean). Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. -- [`transaction_remove_expired_transaction_cleanup_interval_ms`](#spec.userConfig.kafka.transaction_remove_expired_transaction_cleanup_interval_ms-property){: name='spec.userConfig.kafka.transaction_remove_expired_transaction_cleanup_interval_ms-property'} (integer, Minimum: 600000, Maximum: 3600000). The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)). -- [`transaction_state_log_segment_bytes`](#spec.userConfig.kafka.transaction_state_log_segment_bytes-property){: name='spec.userConfig.kafka.transaction_state_log_segment_bytes-property'} (integer, Minimum: 1048576, Maximum: 2147483647). The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)). +- [`auto_create_topics_enable`](#spec.userConfig.kafka.auto_create_topics_enable-property){: name='spec.userConfig.kafka.auto_create_topics_enable-property'} (boolean). Enable auto-creation of topics. (Default: true). +- [`compression_type`](#spec.userConfig.kafka.compression_type-property){: name='spec.userConfig.kafka.compression_type-property'} (string, Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`). Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer). +- [`connections_max_idle_ms`](#spec.userConfig.kafka.connections_max_idle_ms-property){: name='spec.userConfig.kafka.connections_max_idle_ms-property'} (integer, Minimum: 1000, Maximum: 3600000). Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). +- [`default_replication_factor`](#spec.userConfig.kafka.default_replication_factor-property){: name='spec.userConfig.kafka.default_replication_factor-property'} (integer, Minimum: 1, Maximum: 10). Replication factor for auto-created topics (Default: 3). +- [`group_initial_rebalance_delay_ms`](#spec.userConfig.kafka.group_initial_rebalance_delay_ms-property){: name='spec.userConfig.kafka.group_initial_rebalance_delay_ms-property'} (integer, Minimum: 0, Maximum: 300000). The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). +- [`group_max_session_timeout_ms`](#spec.userConfig.kafka.group_max_session_timeout_ms-property){: name='spec.userConfig.kafka.group_max_session_timeout_ms-property'} (integer, Minimum: 0, Maximum: 1800000). The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). +- [`group_min_session_timeout_ms`](#spec.userConfig.kafka.group_min_session_timeout_ms-property){: name='spec.userConfig.kafka.group_min_session_timeout_ms-property'} (integer, Minimum: 0, Maximum: 60000). The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). +- [`log_cleaner_delete_retention_ms`](#spec.userConfig.kafka.log_cleaner_delete_retention_ms-property){: name='spec.userConfig.kafka.log_cleaner_delete_retention_ms-property'} (integer, Minimum: 0, Maximum: 315569260000). How long are delete records retained? (Default: 86400000 (1 day)). +- [`log_cleaner_max_compaction_lag_ms`](#spec.userConfig.kafka.log_cleaner_max_compaction_lag_ms-property){: name='spec.userConfig.kafka.log_cleaner_max_compaction_lag_ms-property'} (integer, Minimum: 30000). The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)). +- [`log_cleaner_min_cleanable_ratio`](#spec.userConfig.kafka.log_cleaner_min_cleanable_ratio-property){: name='spec.userConfig.kafka.log_cleaner_min_cleanable_ratio-property'} (number, Minimum: 0.2, Maximum: 0.9). Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). +- [`log_cleaner_min_compaction_lag_ms`](#spec.userConfig.kafka.log_cleaner_min_compaction_lag_ms-property){: name='spec.userConfig.kafka.log_cleaner_min_compaction_lag_ms-property'} (integer, Minimum: 0). The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms). +- [`log_cleanup_policy`](#spec.userConfig.kafka.log_cleanup_policy-property){: name='spec.userConfig.kafka.log_cleanup_policy-property'} (string, Enum: `delete`, `compact`, `compact,delete`). The default cleanup policy for segments beyond the retention window (Default: delete). +- [`log_flush_interval_messages`](#spec.userConfig.kafka.log_flush_interval_messages-property){: name='spec.userConfig.kafka.log_flush_interval_messages-property'} (integer, Minimum: 1). The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). +- [`log_flush_interval_ms`](#spec.userConfig.kafka.log_flush_interval_ms-property){: name='spec.userConfig.kafka.log_flush_interval_ms-property'} (integer, Minimum: 0). The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null). +- [`log_index_interval_bytes`](#spec.userConfig.kafka.log_index_interval_bytes-property){: name='spec.userConfig.kafka.log_index_interval_bytes-property'} (integer, Minimum: 0, Maximum: 104857600). The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). +- [`log_index_size_max_bytes`](#spec.userConfig.kafka.log_index_size_max_bytes-property){: name='spec.userConfig.kafka.log_index_size_max_bytes-property'} (integer, Minimum: 1048576, Maximum: 104857600). The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). +- [`log_local_retention_bytes`](#spec.userConfig.kafka.log_local_retention_bytes-property){: name='spec.userConfig.kafka.log_local_retention_bytes-property'} (integer, Minimum: -2). The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2). +- [`log_local_retention_ms`](#spec.userConfig.kafka.log_local_retention_ms-property){: name='spec.userConfig.kafka.log_local_retention_ms-property'} (integer, Minimum: -2). The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2). +- [`log_message_downconversion_enable`](#spec.userConfig.kafka.log_message_downconversion_enable-property){: name='spec.userConfig.kafka.log_message_downconversion_enable-property'} (boolean). This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true). +- [`log_message_timestamp_difference_max_ms`](#spec.userConfig.kafka.log_message_timestamp_difference_max_ms-property){: name='spec.userConfig.kafka.log_message_timestamp_difference_max_ms-property'} (integer, Minimum: 0). The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)). +- [`log_message_timestamp_type`](#spec.userConfig.kafka.log_message_timestamp_type-property){: name='spec.userConfig.kafka.log_message_timestamp_type-property'} (string, Enum: `CreateTime`, `LogAppendTime`). Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime). +- [`log_preallocate`](#spec.userConfig.kafka.log_preallocate-property){: name='spec.userConfig.kafka.log_preallocate-property'} (boolean). Should pre allocate file when create new segment? (Default: false). +- [`log_retention_bytes`](#spec.userConfig.kafka.log_retention_bytes-property){: name='spec.userConfig.kafka.log_retention_bytes-property'} (integer, Minimum: -1). The maximum size of the log before deleting messages (Default: -1). +- [`log_retention_hours`](#spec.userConfig.kafka.log_retention_hours-property){: name='spec.userConfig.kafka.log_retention_hours-property'} (integer, Minimum: -1, Maximum: 2147483647). The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)). +- [`log_retention_ms`](#spec.userConfig.kafka.log_retention_ms-property){: name='spec.userConfig.kafka.log_retention_ms-property'} (integer, Minimum: -1). The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies). +- [`log_roll_jitter_ms`](#spec.userConfig.kafka.log_roll_jitter_ms-property){: name='spec.userConfig.kafka.log_roll_jitter_ms-property'} (integer, Minimum: 0). The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null). +- [`log_roll_ms`](#spec.userConfig.kafka.log_roll_ms-property){: name='spec.userConfig.kafka.log_roll_ms-property'} (integer, Minimum: 1). The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)). +- [`log_segment_bytes`](#spec.userConfig.kafka.log_segment_bytes-property){: name='spec.userConfig.kafka.log_segment_bytes-property'} (integer, Minimum: 10485760, Maximum: 1073741824). The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)). +- [`log_segment_delete_delay_ms`](#spec.userConfig.kafka.log_segment_delete_delay_ms-property){: name='spec.userConfig.kafka.log_segment_delete_delay_ms-property'} (integer, Minimum: 0, Maximum: 3600000). The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). +- [`max_connections_per_ip`](#spec.userConfig.kafka.max_connections_per_ip-property){: name='spec.userConfig.kafka.max_connections_per_ip-property'} (integer, Minimum: 256, Maximum: 2147483647). The maximum number of connections allowed from each ip address (Default: 2147483647). +- [`max_incremental_fetch_session_cache_slots`](#spec.userConfig.kafka.max_incremental_fetch_session_cache_slots-property){: name='spec.userConfig.kafka.max_incremental_fetch_session_cache_slots-property'} (integer, Minimum: 1000, Maximum: 10000). The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). +- [`message_max_bytes`](#spec.userConfig.kafka.message_max_bytes-property){: name='spec.userConfig.kafka.message_max_bytes-property'} (integer, Minimum: 0, Maximum: 100001200). The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). +- [`min_insync_replicas`](#spec.userConfig.kafka.min_insync_replicas-property){: name='spec.userConfig.kafka.min_insync_replicas-property'} (integer, Minimum: 1, Maximum: 7). When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). +- [`num_partitions`](#spec.userConfig.kafka.num_partitions-property){: name='spec.userConfig.kafka.num_partitions-property'} (integer, Minimum: 1, Maximum: 1000). Number of partitions for auto-created topics (Default: 1). +- [`offsets_retention_minutes`](#spec.userConfig.kafka.offsets_retention_minutes-property){: name='spec.userConfig.kafka.offsets_retention_minutes-property'} (integer, Minimum: 1, Maximum: 2147483647). Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). +- [`producer_purgatory_purge_interval_requests`](#spec.userConfig.kafka.producer_purgatory_purge_interval_requests-property){: name='spec.userConfig.kafka.producer_purgatory_purge_interval_requests-property'} (integer, Minimum: 10, Maximum: 10000). The purge interval (in number of requests) of the producer request purgatory (Default: 1000). +- [`replica_fetch_max_bytes`](#spec.userConfig.kafka.replica_fetch_max_bytes-property){: name='spec.userConfig.kafka.replica_fetch_max_bytes-property'} (integer, Minimum: 1048576, Maximum: 104857600). The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)). +- [`replica_fetch_response_max_bytes`](#spec.userConfig.kafka.replica_fetch_response_max_bytes-property){: name='spec.userConfig.kafka.replica_fetch_response_max_bytes-property'} (integer, Minimum: 10485760, Maximum: 1048576000). Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)). +- [`sasl_oauthbearer_expected_audience`](#spec.userConfig.kafka.sasl_oauthbearer_expected_audience-property){: name='spec.userConfig.kafka.sasl_oauthbearer_expected_audience-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 128). The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null). +- [`sasl_oauthbearer_expected_issuer`](#spec.userConfig.kafka.sasl_oauthbearer_expected_issuer-property){: name='spec.userConfig.kafka.sasl_oauthbearer_expected_issuer-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 128). Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null). +- [`sasl_oauthbearer_jwks_endpoint_url`](#spec.userConfig.kafka.sasl_oauthbearer_jwks_endpoint_url-property){: name='spec.userConfig.kafka.sasl_oauthbearer_jwks_endpoint_url-property'} (string, MaxLength: 2048). OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null). +- [`sasl_oauthbearer_sub_claim_name`](#spec.userConfig.kafka.sasl_oauthbearer_sub_claim_name-property){: name='spec.userConfig.kafka.sasl_oauthbearer_sub_claim_name-property'} (string, Pattern: `^[^\r\n]*\S[^\r\n]*$`, MaxLength: 128). Name of the scope from which to extract the subject claim from the JWT.(Default: sub). +- [`socket_request_max_bytes`](#spec.userConfig.kafka.socket_request_max_bytes-property){: name='spec.userConfig.kafka.socket_request_max_bytes-property'} (integer, Minimum: 10485760, Maximum: 209715200). The maximum number of bytes in a socket request (Default: 104857600 bytes). +- [`transaction_partition_verification_enable`](#spec.userConfig.kafka.transaction_partition_verification_enable-property){: name='spec.userConfig.kafka.transaction_partition_verification_enable-property'} (boolean). Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false). +- [`transaction_remove_expired_transaction_cleanup_interval_ms`](#spec.userConfig.kafka.transaction_remove_expired_transaction_cleanup_interval_ms-property){: name='spec.userConfig.kafka.transaction_remove_expired_transaction_cleanup_interval_ms-property'} (integer, Minimum: 600000, Maximum: 3600000). The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). +- [`transaction_state_log_segment_bytes`](#spec.userConfig.kafka.transaction_state_log_segment_bytes-property){: name='spec.userConfig.kafka.transaction_state_log_segment_bytes-property'} (integer, Minimum: 1048576, Maximum: 2147483647). The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). ### kafka_authentication_methods {: #spec.userConfig.kafka_authentication_methods } diff --git a/docs/docs/api-reference/mysql.md b/docs/docs/api-reference/mysql.md index 6aa16a27..8fe49b8f 100644 --- a/docs/docs/api-reference/mysql.md +++ b/docs/docs/api-reference/mysql.md @@ -252,6 +252,7 @@ Migrate data from existing server. - [`dbname`](#spec.userConfig.migration.dbname-property){: name='spec.userConfig.migration.dbname-property'} (string, MaxLength: 63). Database name for bootstrapping the initial connection. - [`ignore_dbs`](#spec.userConfig.migration.ignore_dbs-property){: name='spec.userConfig.migration.ignore_dbs-property'} (string, MaxLength: 2048). Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). +- [`ignore_roles`](#spec.userConfig.migration.ignore_roles-property){: name='spec.userConfig.migration.ignore_roles-property'} (string, MaxLength: 2048). Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). - [`method`](#spec.userConfig.migration.method-property){: name='spec.userConfig.migration.method-property'} (string, Enum: `dump`, `replication`). The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - [`password`](#spec.userConfig.migration.password-property){: name='spec.userConfig.migration.password-property'} (string, MaxLength: 256). Password for authentication with the server where to migrate data from. - [`ssl`](#spec.userConfig.migration.ssl-property){: name='spec.userConfig.migration.ssl-property'} (boolean). The server where to migrate data from is secured with SSL. diff --git a/docs/docs/api-reference/postgresql.md b/docs/docs/api-reference/postgresql.md index 1040a52e..cfdb3fc6 100644 --- a/docs/docs/api-reference/postgresql.md +++ b/docs/docs/api-reference/postgresql.md @@ -258,6 +258,7 @@ Migrate data from existing server. - [`dbname`](#spec.userConfig.migration.dbname-property){: name='spec.userConfig.migration.dbname-property'} (string, MaxLength: 63). Database name for bootstrapping the initial connection. - [`ignore_dbs`](#spec.userConfig.migration.ignore_dbs-property){: name='spec.userConfig.migration.ignore_dbs-property'} (string, MaxLength: 2048). Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). +- [`ignore_roles`](#spec.userConfig.migration.ignore_roles-property){: name='spec.userConfig.migration.ignore_roles-property'} (string, MaxLength: 2048). Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). - [`method`](#spec.userConfig.migration.method-property){: name='spec.userConfig.migration.method-property'} (string, Enum: `dump`, `replication`). The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - [`password`](#spec.userConfig.migration.password-property){: name='spec.userConfig.migration.password-property'} (string, MaxLength: 256). Password for authentication with the server where to migrate data from. - [`ssl`](#spec.userConfig.migration.ssl-property){: name='spec.userConfig.migration.ssl-property'} (boolean). The server where to migrate data from is secured with SSL. @@ -371,6 +372,7 @@ PGBouncer connection pooling settings. - [`autodb_pool_mode`](#spec.userConfig.pgbouncer.autodb_pool_mode-property){: name='spec.userConfig.pgbouncer.autodb_pool_mode-property'} (string, Enum: `session`, `transaction`, `statement`). PGBouncer pool mode. - [`autodb_pool_size`](#spec.userConfig.pgbouncer.autodb_pool_size-property){: name='spec.userConfig.pgbouncer.autodb_pool_size-property'} (integer, Minimum: 0, Maximum: 10000). If non-zero then create automatically a pool of that size per user when a pool doesn't exist. - [`ignore_startup_parameters`](#spec.userConfig.pgbouncer.ignore_startup_parameters-property){: name='spec.userConfig.pgbouncer.ignore_startup_parameters-property'} (array of strings, MaxItems: 32). List of parameters to ignore when given in startup packet. +- [`max_prepared_statements`](#spec.userConfig.pgbouncer.max_prepared_statements-property){: name='spec.userConfig.pgbouncer.max_prepared_statements-property'} (integer, Minimum: 0, Maximum: 3000). PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. - [`min_pool_size`](#spec.userConfig.pgbouncer.min_pool_size-property){: name='spec.userConfig.pgbouncer.min_pool_size-property'} (integer, Minimum: 0, Maximum: 10000). Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. - [`server_idle_timeout`](#spec.userConfig.pgbouncer.server_idle_timeout-property){: name='spec.userConfig.pgbouncer.server_idle_timeout-property'} (integer, Minimum: 0, Maximum: 86400). If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. [seconds]. - [`server_lifetime`](#spec.userConfig.pgbouncer.server_lifetime-property){: name='spec.userConfig.pgbouncer.server_lifetime-property'} (integer, Minimum: 60, Maximum: 86400). The pooler will close an unused server connection that has been connected longer than this. [seconds]. diff --git a/docs/docs/api-reference/redis.md b/docs/docs/api-reference/redis.md index e7e4cc45..f68bf1b9 100644 --- a/docs/docs/api-reference/redis.md +++ b/docs/docs/api-reference/redis.md @@ -247,6 +247,7 @@ Migrate data from existing server. - [`dbname`](#spec.userConfig.migration.dbname-property){: name='spec.userConfig.migration.dbname-property'} (string, MaxLength: 63). Database name for bootstrapping the initial connection. - [`ignore_dbs`](#spec.userConfig.migration.ignore_dbs-property){: name='spec.userConfig.migration.ignore_dbs-property'} (string, MaxLength: 2048). Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). +- [`ignore_roles`](#spec.userConfig.migration.ignore_roles-property){: name='spec.userConfig.migration.ignore_roles-property'} (string, MaxLength: 2048). Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). - [`method`](#spec.userConfig.migration.method-property){: name='spec.userConfig.migration.method-property'} (string, Enum: `dump`, `replication`). The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - [`password`](#spec.userConfig.migration.password-property){: name='spec.userConfig.migration.password-property'} (string, MaxLength: 256). Password for authentication with the server where to migrate data from. - [`ssl`](#spec.userConfig.migration.ssl-property){: name='spec.userConfig.migration.ssl-property'} (boolean). The server where to migrate data from is secured with SSL. diff --git a/docs/docs/api-reference/serviceintegration.md b/docs/docs/api-reference/serviceintegration.md index 7a5f59e6..1a0ed1cf 100644 --- a/docs/docs/api-reference/serviceintegration.md +++ b/docs/docs/api-reference/serviceintegration.md @@ -314,7 +314,9 @@ Table to create. - [`max_rows_per_message`](#spec.clickhouseKafka.tables.max_rows_per_message-property){: name='spec.clickhouseKafka.tables.max_rows_per_message-property'} (integer, Minimum: 1, Maximum: 1000000000). The maximum number of rows produced in one kafka message for row-based formats. - [`num_consumers`](#spec.clickhouseKafka.tables.num_consumers-property){: name='spec.clickhouseKafka.tables.num_consumers-property'} (integer, Minimum: 1, Maximum: 10). The number of consumers per table per replica. - [`poll_max_batch_size`](#spec.clickhouseKafka.tables.poll_max_batch_size-property){: name='spec.clickhouseKafka.tables.poll_max_batch_size-property'} (integer, Minimum: 0, Maximum: 1000000000). Maximum amount of messages to be polled in a single Kafka poll. +- [`poll_max_timeout_ms`](#spec.clickhouseKafka.tables.poll_max_timeout_ms-property){: name='spec.clickhouseKafka.tables.poll_max_timeout_ms-property'} (integer, Minimum: 0, Maximum: 30000). Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). - [`skip_broken_messages`](#spec.clickhouseKafka.tables.skip_broken_messages-property){: name='spec.clickhouseKafka.tables.skip_broken_messages-property'} (integer, Minimum: 0, Maximum: 1000000000). Skip at least this number of broken messages from Kafka topic per block. +- [`thread_per_consumer`](#spec.clickhouseKafka.tables.thread_per_consumer-property){: name='spec.clickhouseKafka.tables.thread_per_consumer-property'} (boolean). Provide an independent thread for each consumer. All consumers run in the same thread by default. #### columns {: #spec.clickhouseKafka.tables.columns }