From 2c3aa2afa091db63a0122e6851b6a4e2316047df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" Date: Mon, 19 Aug 2024 00:25:42 +0000 Subject: [PATCH] chore(userconfigs): generate files --- CHANGELOG.md | 10 + .../userconfig/service/grafana/grafana.go | 3 + .../service/grafana/zz_generated.deepcopy.go | 5 + .../userconfig/service/kafka/kafka.go | 17 +- .../service/kafka/zz_generated.deepcopy.go | 35 ++++ .../service/opensearch/opensearch.go | 132 ++++++++++++ .../opensearch/zz_generated.deepcopy.go | 160 +++++++++++++++ .../templates/aiven.io_grafanas.yaml | 5 + .../templates/aiven.io_kafkas.yaml | 15 +- .../templates/aiven.io_opensearches.yaml | 191 ++++++++++++++++++ config/crd/bases/aiven.io_grafanas.yaml | 5 + config/crd/bases/aiven.io_kafkas.yaml | 15 +- config/crd/bases/aiven.io_opensearches.yaml | 191 ++++++++++++++++++ docs/docs/api-reference/grafana.md | 1 + docs/docs/api-reference/kafka.md | 15 +- docs/docs/api-reference/opensearch.md | 79 +++++++- 16 files changed, 872 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a5ad3fd..dc61cc0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,16 @@ statement pooling modes when max_prepared_statements is set to a non-zero value - Add `Redis` field `userConfig.migration.ignore_roles`, type `string`: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment) +- Add `Grafana` field `userConfig.wal`, type `boolean`: Setting to enable/disable Write-Ahead Logging. + The default value is false (disabled) +- Add `Kafka` field `userConfig.kafka_sasl_mechanisms`, type `object`: Kafka SASL mechanisms +- Add `OpenSearch` field `userConfig.azure_migration`, type `object`: +- Add `OpenSearch` field `userConfig.gcs_migration`, type `object`: +- Add `OpenSearch` field `userConfig.index_rollup`, type `object`: Index rollup settings +- Add `OpenSearch` field `userConfig.s3_migration`, type `object`: +- Change `OpenSearch` field `userConfig.openid.connect_url`: pattern `^[^\r\n]*$` +- Change `OpenSearch` field `userConfig.opensearch.script_max_compilations_rate`: pattern `^[^\r\n]*$` +- Change `OpenSearch` field `userConfig.saml.idp_metadata_url`: pattern `^[^\r\n]*$` ## v0.24.0 - 2024-07-16 diff --git a/api/v1alpha1/userconfig/service/grafana/grafana.go b/api/v1alpha1/userconfig/service/grafana/grafana.go index 402ce754..6846edd3 100644 --- a/api/v1alpha1/userconfig/service/grafana/grafana.go +++ b/api/v1alpha1/userconfig/service/grafana/grafana.go @@ -441,4 +441,7 @@ type GrafanaUserConfig struct { // Users with view-only permission can edit but not save dashboards ViewersCanEdit *bool `groups:"create,update" json:"viewers_can_edit,omitempty"` + + // Setting to enable/disable Write-Ahead Logging. The default value is false (disabled). + Wal *bool `groups:"create,update" json:"wal,omitempty"` } diff --git a/api/v1alpha1/userconfig/service/grafana/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/grafana/zz_generated.deepcopy.go index 7ed02b38..baebdeb9 100644 --- a/api/v1alpha1/userconfig/service/grafana/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/grafana/zz_generated.deepcopy.go @@ -465,6 +465,11 @@ func (in *GrafanaUserConfig) DeepCopyInto(out *GrafanaUserConfig) { *out = new(bool) **out = **in } + if in.Wal != nil { + in, out := &in.Wal, &out.Wal + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrafanaUserConfig. diff --git a/api/v1alpha1/userconfig/service/kafka/kafka.go b/api/v1alpha1/userconfig/service/kafka/kafka.go index fc6b96c9..c09b376b 100644 --- a/api/v1alpha1/userconfig/service/kafka/kafka.go +++ b/api/v1alpha1/userconfig/service/kafka/kafka.go @@ -216,7 +216,7 @@ type Kafka struct { // The maximum number of bytes in a socket request (Default: 104857600 bytes). SocketRequestMaxBytes *int `groups:"create,update" json:"socket_request_max_bytes,omitempty"` - // Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false) + // Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true) TransactionPartitionVerificationEnable *bool `groups:"create,update" json:"transaction_partition_verification_enable,omitempty"` // +kubebuilder:validation:Minimum=600000 @@ -416,6 +416,18 @@ type KafkaRestConfig struct { SimpleconsumerPoolSizeMax *int `groups:"create,update" json:"simpleconsumer_pool_size_max,omitempty"` } +// Kafka SASL mechanisms +type KafkaSaslMechanisms struct { + // Enable PLAIN mechanism + Plain *bool `groups:"create,update" json:"plain,omitempty"` + + // Enable SCRAM-SHA-256 mechanism + ScramSha256 *bool `groups:"create,update" json:"scram_sha_256,omitempty"` + + // Enable SCRAM-SHA-512 mechanism + ScramSha512 *bool `groups:"create,update" json:"scram_sha_512,omitempty"` +} + // Allow access to selected service ports from private networks type PrivateAccess struct { // Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations @@ -546,6 +558,9 @@ type KafkaUserConfig struct { // Kafka REST configuration KafkaRestConfig *KafkaRestConfig `groups:"create,update" json:"kafka_rest_config,omitempty"` + // Kafka SASL mechanisms + KafkaSaslMechanisms *KafkaSaslMechanisms `groups:"create,update" json:"kafka_sasl_mechanisms,omitempty"` + // +kubebuilder:validation:Enum="3.4";"3.5";"3.6";"3.7" // Kafka major version KafkaVersion *string `groups:"create,update" json:"kafka_version,omitempty"` diff --git a/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go index 616118fa..09380f9d 100644 --- a/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/kafka/zz_generated.deepcopy.go @@ -526,6 +526,36 @@ func (in *KafkaRestConfig) DeepCopy() *KafkaRestConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSaslMechanisms) DeepCopyInto(out *KafkaSaslMechanisms) { + *out = *in + if in.Plain != nil { + in, out := &in.Plain, &out.Plain + *out = new(bool) + **out = **in + } + if in.ScramSha256 != nil { + in, out := &in.ScramSha256, &out.ScramSha256 + *out = new(bool) + **out = **in + } + if in.ScramSha512 != nil { + in, out := &in.ScramSha512, &out.ScramSha512 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSaslMechanisms. +func (in *KafkaSaslMechanisms) DeepCopy() *KafkaSaslMechanisms { + if in == nil { + return nil + } + out := new(KafkaSaslMechanisms) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KafkaUserConfig) DeepCopyInto(out *KafkaUserConfig) { *out = *in @@ -606,6 +636,11 @@ func (in *KafkaUserConfig) DeepCopyInto(out *KafkaUserConfig) { *out = new(KafkaRestConfig) (*in).DeepCopyInto(*out) } + if in.KafkaSaslMechanisms != nil { + in, out := &in.KafkaSaslMechanisms, &out.KafkaSaslMechanisms + *out = new(KafkaSaslMechanisms) + (*in).DeepCopyInto(*out) + } if in.KafkaVersion != nil { in, out := &in.KafkaVersion, &out.KafkaVersion *out = new(string) diff --git a/api/v1alpha1/userconfig/service/opensearch/opensearch.go b/api/v1alpha1/userconfig/service/opensearch/opensearch.go index 5e1e90dd..d18d3f56 100644 --- a/api/v1alpha1/userconfig/service/opensearch/opensearch.go +++ b/api/v1alpha1/userconfig/service/opensearch/opensearch.go @@ -3,6 +3,67 @@ package opensearchuserconfig +type AzureMigration struct { + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Azure account name + Account string `groups:"create,update" json:"account"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The path to the repository data within its container. The value of this setting should not start or end with a / + BasePath string `groups:"create,update" json:"base_path"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository + ChunkSize *string `groups:"create,update" json:"chunk_size,omitempty"` + + // when set to true metadata files are stored in compressed format + Compress *bool `groups:"create,update" json:"compress,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Azure container name + Container string `groups:"create,update" json:"container"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Defines the DNS suffix for Azure Storage endpoints. + EndpointSuffix *string `groups:"create,update" json:"endpoint_suffix,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Azure account secret key. One of key or sas_token should be specified + Key *string `groups:"create,update" json:"key,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // A shared access signatures (SAS) token. One of key or sas_token should be specified + SasToken *string `groups:"create,update" json:"sas_token,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The snapshot name to restore from + SnapshotName string `groups:"create,update" json:"snapshot_name"` +} +type GcsMigration struct { + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The path to the repository data within its container. The value of this setting should not start or end with a / + BasePath string `groups:"create,update" json:"base_path"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The path to the repository data within its container + Bucket string `groups:"create,update" json:"bucket"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository + ChunkSize *string `groups:"create,update" json:"chunk_size,omitempty"` + + // when set to true metadata files are stored in compressed format + Compress *bool `groups:"create,update" json:"compress,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Google Cloud Storage credentials file content + Credentials string `groups:"create,update" json:"credentials"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The snapshot name to restore from + SnapshotName string `groups:"create,update" json:"snapshot_name"` +} + // Allows you to create glob style patterns and set a max number of indexes matching this pattern you want to keep. Creating indexes exceeding this value will cause the oldest one to get deleted. You could for example create a pattern looking like 'logs.?' and then create index logs.1, logs.2 etc, it will delete logs.1 once you create logs.6. Do note 'logs.?' does not apply to logs.10. Note: Setting max_index_count to 0 will do nothing and the pattern gets ignored. type IndexPatterns struct { // +kubebuilder:validation:Minimum=0 @@ -19,6 +80,26 @@ type IndexPatterns struct { SortingAlgorithm *string `groups:"create,update" json:"sorting_algorithm,omitempty"` } +// Index rollup settings +type IndexRollup struct { + // Whether rollups are enabled in OpenSearch Dashboards. Defaults to true. + RollupDashboardsEnabled *bool `groups:"create,update" json:"rollup_dashboards_enabled,omitempty"` + + // Whether the rollup plugin is enabled. Defaults to true. + RollupEnabled *bool `groups:"create,update" json:"rollup_enabled,omitempty"` + + // +kubebuilder:validation:Minimum=1 + // How many retries the plugin should attempt for failed rollup jobs. Defaults to 5. + RollupSearchBackoffCount *int `groups:"create,update" json:"rollup_search_backoff_count,omitempty"` + + // +kubebuilder:validation:Minimum=1 + // The backoff time between retries for failed rollup jobs. Defaults to 1000ms. + RollupSearchBackoffMillis *int `groups:"create,update" json:"rollup_search_backoff_millis,omitempty"` + + // Whether OpenSearch should return all jobs that match all specified search terms. If disabled, OpenSearch returns just one, as opposed to all, of the jobs that matches the search terms. Defaults to false. + RollupSearchSearchAllJobs *bool `groups:"create,update" json:"rollup_search_search_all_jobs,omitempty"` +} + // Template settings for all new indexes type IndexTemplate struct { // +kubebuilder:validation:Minimum=0 @@ -63,6 +144,7 @@ type Openid struct { ClientSecret string `groups:"create,update" json:"client_secret"` // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` // The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. ConnectUrl string `groups:"create,update" json:"connect_url"` @@ -329,6 +411,7 @@ type Opensearch struct { ReindexRemoteWhitelist []string `groups:"create,update" json:"reindex_remote_whitelist,omitempty"` // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` // Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context ScriptMaxCompilationsRate *string `groups:"create,update" json:"script_max_compilations_rate,omitempty"` @@ -444,6 +527,45 @@ type PublicAccess struct { // Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network Prometheus *bool `groups:"create,update" json:"prometheus,omitempty"` } +type S3Migration struct { + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // AWS Access key + AccessKey string `groups:"create,update" json:"access_key"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The path to the repository data within its container. The value of this setting should not start or end with a / + BasePath string `groups:"create,update" json:"base_path"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // S3 bucket name + Bucket string `groups:"create,update" json:"bucket"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository + ChunkSize *string `groups:"create,update" json:"chunk_size,omitempty"` + + // when set to true metadata files are stored in compressed format + Compress *bool `groups:"create,update" json:"compress,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint + Endpoint *string `groups:"create,update" json:"endpoint,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // S3 region + Region string `groups:"create,update" json:"region"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // AWS secret key + SecretKey string `groups:"create,update" json:"secret_key"` + + // When set to true files are encrypted on server side + ServerSideEncryption *bool `groups:"create,update" json:"server_side_encryption,omitempty"` + + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` + // The snapshot name to restore from + SnapshotName string `groups:"create,update" json:"snapshot_name"` +} // OpenSearch SAML configuration type Saml struct { @@ -458,6 +580,7 @@ type Saml struct { // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^[^\r\n]*$` // The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP. IdpMetadataUrl string `groups:"create,update" json:"idp_metadata_url"` @@ -488,6 +611,8 @@ type OpensearchUserConfig struct { // Additional Cloud Regions for Backup Replication AdditionalBackupRegions []string `groups:"create,update" json:"additional_backup_regions,omitempty"` + AzureMigration *AzureMigration `groups:"create,update" json:"azure_migration,omitempty"` + // +kubebuilder:validation:MaxLength=255 // Serve the web frontend using a custom CNAME pointing to the Aiven DNS name CustomDomain *string `groups:"create,update" json:"custom_domain,omitempty"` @@ -495,10 +620,15 @@ type OpensearchUserConfig struct { // DEPRECATED: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated. DisableReplicationFactorAdjustment *bool `groups:"create,update" json:"disable_replication_factor_adjustment,omitempty"` + GcsMigration *GcsMigration `groups:"create,update" json:"gcs_migration,omitempty"` + // +kubebuilder:validation:MaxItems=512 // Index patterns IndexPatterns []*IndexPatterns `groups:"create,update" json:"index_patterns,omitempty"` + // Index rollup settings + IndexRollup *IndexRollup `groups:"create,update" json:"index_rollup,omitempty"` + // Template settings for all new indexes IndexTemplate *IndexTemplate `groups:"create,update" json:"index_template,omitempty"` @@ -546,6 +676,8 @@ type OpensearchUserConfig struct { // Name of the basebackup to restore in forked service RecoveryBasebackupName *string `groups:"create,update" json:"recovery_basebackup_name,omitempty"` + S3Migration *S3Migration `groups:"create,update" json:"s3_migration,omitempty"` + // OpenSearch SAML configuration Saml *Saml `groups:"create,update" json:"saml,omitempty"` diff --git a/api/v1alpha1/userconfig/service/opensearch/zz_generated.deepcopy.go b/api/v1alpha1/userconfig/service/opensearch/zz_generated.deepcopy.go index 28d175ee..8d3b981f 100644 --- a/api/v1alpha1/userconfig/service/opensearch/zz_generated.deepcopy.go +++ b/api/v1alpha1/userconfig/service/opensearch/zz_generated.deepcopy.go @@ -31,6 +31,71 @@ func (in *AuthFailureListeners) DeepCopy() *AuthFailureListeners { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMigration) DeepCopyInto(out *AzureMigration) { + *out = *in + if in.ChunkSize != nil { + in, out := &in.ChunkSize, &out.ChunkSize + *out = new(string) + **out = **in + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.EndpointSuffix != nil { + in, out := &in.EndpointSuffix, &out.EndpointSuffix + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.SasToken != nil { + in, out := &in.SasToken, &out.SasToken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMigration. +func (in *AzureMigration) DeepCopy() *AzureMigration { + if in == nil { + return nil + } + out := new(AzureMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GcsMigration) DeepCopyInto(out *GcsMigration) { + *out = *in + if in.ChunkSize != nil { + in, out := &in.ChunkSize, &out.ChunkSize + *out = new(string) + **out = **in + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GcsMigration. +func (in *GcsMigration) DeepCopy() *GcsMigration { + if in == nil { + return nil + } + out := new(GcsMigration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IndexPatterns) DeepCopyInto(out *IndexPatterns) { *out = *in @@ -51,6 +116,46 @@ func (in *IndexPatterns) DeepCopy() *IndexPatterns { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexRollup) DeepCopyInto(out *IndexRollup) { + *out = *in + if in.RollupDashboardsEnabled != nil { + in, out := &in.RollupDashboardsEnabled, &out.RollupDashboardsEnabled + *out = new(bool) + **out = **in + } + if in.RollupEnabled != nil { + in, out := &in.RollupEnabled, &out.RollupEnabled + *out = new(bool) + **out = **in + } + if in.RollupSearchBackoffCount != nil { + in, out := &in.RollupSearchBackoffCount, &out.RollupSearchBackoffCount + *out = new(int) + **out = **in + } + if in.RollupSearchBackoffMillis != nil { + in, out := &in.RollupSearchBackoffMillis, &out.RollupSearchBackoffMillis + *out = new(int) + **out = **in + } + if in.RollupSearchSearchAllJobs != nil { + in, out := &in.RollupSearchSearchAllJobs, &out.RollupSearchSearchAllJobs + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexRollup. +func (in *IndexRollup) DeepCopy() *IndexRollup { + if in == nil { + return nil + } + out := new(IndexRollup) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IndexTemplate) DeepCopyInto(out *IndexTemplate) { *out = *in @@ -524,6 +629,11 @@ func (in *OpensearchUserConfig) DeepCopyInto(out *OpensearchUserConfig) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.AzureMigration != nil { + in, out := &in.AzureMigration, &out.AzureMigration + *out = new(AzureMigration) + (*in).DeepCopyInto(*out) + } if in.CustomDomain != nil { in, out := &in.CustomDomain, &out.CustomDomain *out = new(string) @@ -534,6 +644,11 @@ func (in *OpensearchUserConfig) DeepCopyInto(out *OpensearchUserConfig) { *out = new(bool) **out = **in } + if in.GcsMigration != nil { + in, out := &in.GcsMigration, &out.GcsMigration + *out = new(GcsMigration) + (*in).DeepCopyInto(*out) + } if in.IndexPatterns != nil { in, out := &in.IndexPatterns, &out.IndexPatterns *out = make([]*IndexPatterns, len(*in)) @@ -545,6 +660,11 @@ func (in *OpensearchUserConfig) DeepCopyInto(out *OpensearchUserConfig) { } } } + if in.IndexRollup != nil { + in, out := &in.IndexRollup, &out.IndexRollup + *out = new(IndexRollup) + (*in).DeepCopyInto(*out) + } if in.IndexTemplate != nil { in, out := &in.IndexTemplate, &out.IndexTemplate *out = new(IndexTemplate) @@ -616,6 +736,11 @@ func (in *OpensearchUserConfig) DeepCopyInto(out *OpensearchUserConfig) { *out = new(string) **out = **in } + if in.S3Migration != nil { + in, out := &in.S3Migration, &out.S3Migration + *out = new(S3Migration) + (*in).DeepCopyInto(*out) + } if in.Saml != nil { in, out := &in.Saml, &out.Saml *out = new(Saml) @@ -738,6 +863,41 @@ func (in *PublicAccess) DeepCopy() *PublicAccess { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Migration) DeepCopyInto(out *S3Migration) { + *out = *in + if in.ChunkSize != nil { + in, out := &in.ChunkSize, &out.ChunkSize + *out = new(string) + **out = **in + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Migration. +func (in *S3Migration) DeepCopy() *S3Migration { + if in == nil { + return nil + } + out := new(S3Migration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Saml) DeepCopyInto(out *Saml) { *out = *in diff --git a/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml b/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml index af1ed8bc..091b7727 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_grafanas.yaml @@ -825,6 +825,11 @@ spec: Users with view-only permission can edit but not save dashboards type: boolean + wal: + description: + Setting to enable/disable Write-Ahead Logging. The + default value is false (disabled). + type: boolean type: object required: - plan diff --git a/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml b/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml index cd8b3c25..3782a8a2 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_kafkas.yaml @@ -620,7 +620,7 @@ spec: description: "Enable verification that checks that the partition has been added to the transaction before writing transactional - records to the partition. (Default: false)" + records to the partition. (Default: true)" type: boolean transaction_remove_expired_transaction_cleanup_interval_ms: description: @@ -977,6 +977,19 @@ spec: minimum: 10 type: integer type: object + kafka_sasl_mechanisms: + description: Kafka SASL mechanisms + properties: + plain: + description: Enable PLAIN mechanism + type: boolean + scram_sha_256: + description: Enable SCRAM-SHA-256 mechanism + type: boolean + scram_sha_512: + description: Enable SCRAM-SHA-512 mechanism + type: boolean + type: object kafka_version: description: Kafka major version enum: diff --git a/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml b/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml index 6be0fa44..3b23172b 100644 --- a/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml +++ b/charts/aiven-operator-crds/templates/aiven.io_opensearches.yaml @@ -230,6 +230,61 @@ spec: type: string maxItems: 1 type: array + azure_migration: + properties: + account: + description: Azure account name + pattern: ^[^\r\n]*$ + type: string + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + container: + description: Azure container name + pattern: ^[^\r\n]*$ + type: string + endpoint_suffix: + description: Defines the DNS suffix for Azure Storage endpoints. + pattern: ^[^\r\n]*$ + type: string + key: + description: + Azure account secret key. One of key or sas_token + should be specified + pattern: ^[^\r\n]*$ + type: string + sas_token: + description: + A shared access signatures (SAS) token. One of + key or sas_token should be specified + pattern: ^[^\r\n]*$ + type: string + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - account + - base_path + - container + - snapshot_name + type: object custom_domain: description: Serve the web frontend using a custom CNAME pointing @@ -244,6 +299,45 @@ spec: to potential data loss in case of losing a service node, this setting can no longer be activated." type: boolean + gcs_migration: + properties: + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + bucket: + description: The path to the repository data within its container + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + credentials: + description: Google Cloud Storage credentials file content + pattern: ^[^\r\n]*$ + type: string + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - base_path + - bucket + - credentials + - snapshot_name + type: object index_patterns: description: Index patterns items: @@ -279,6 +373,39 @@ spec: type: object maxItems: 512 type: array + index_rollup: + description: Index rollup settings + properties: + rollup_dashboards_enabled: + description: + Whether rollups are enabled in OpenSearch Dashboards. + Defaults to true. + type: boolean + rollup_enabled: + description: + Whether the rollup plugin is enabled. Defaults + to true. + type: boolean + rollup_search_backoff_count: + description: + How many retries the plugin should attempt for + failed rollup jobs. Defaults to 5. + minimum: 1 + type: integer + rollup_search_backoff_millis: + description: + The backoff time between retries for failed rollup + jobs. Defaults to 1000ms. + minimum: 1 + type: integer + rollup_search_search_all_jobs: + description: + Whether OpenSearch should return all jobs that + match all specified search terms. If disabled, OpenSearch + returns just one, as opposed to all, of the jobs that matches + the search terms. Defaults to false. + type: boolean + type: object index_template: description: Template settings for all new indexes properties: @@ -361,6 +488,7 @@ spec: The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. maxLength: 2048 + pattern: ^[^\r\n]*$ type: string enabled: description: @@ -744,6 +872,7 @@ spec: number of inline script compilations within a period of time. Default is use-context maxLength: 1024 + pattern: ^[^\r\n]*$ type: string search_max_buckets: description: @@ -951,6 +1080,67 @@ spec: maxLength: 128 pattern: ^[a-zA-Z0-9-_:.]+$ type: string + s3_migration: + properties: + access_key: + description: AWS Access key + pattern: ^[^\r\n]*$ + type: string + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + bucket: + description: S3 bucket name + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + endpoint: + description: + The S3 service endpoint to connect to. If you + are using an S3-compatible service then you should set this + to the service’s endpoint + pattern: ^[^\r\n]*$ + type: string + region: + description: S3 region + pattern: ^[^\r\n]*$ + type: string + secret_key: + description: AWS secret key + pattern: ^[^\r\n]*$ + type: string + server_side_encryption: + description: + When set to true files are encrypted on server + side + type: boolean + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - access_key + - base_path + - bucket + - region + - secret_key + - snapshot_name + type: object saml: description: OpenSearch SAML configuration properties: @@ -976,6 +1166,7 @@ spec: with the IdP. maxLength: 2048 minLength: 1 + pattern: ^[^\r\n]*$ type: string idp_pemtrustedcas_content: description: diff --git a/config/crd/bases/aiven.io_grafanas.yaml b/config/crd/bases/aiven.io_grafanas.yaml index af1ed8bc..091b7727 100644 --- a/config/crd/bases/aiven.io_grafanas.yaml +++ b/config/crd/bases/aiven.io_grafanas.yaml @@ -825,6 +825,11 @@ spec: Users with view-only permission can edit but not save dashboards type: boolean + wal: + description: + Setting to enable/disable Write-Ahead Logging. The + default value is false (disabled). + type: boolean type: object required: - plan diff --git a/config/crd/bases/aiven.io_kafkas.yaml b/config/crd/bases/aiven.io_kafkas.yaml index cd8b3c25..3782a8a2 100644 --- a/config/crd/bases/aiven.io_kafkas.yaml +++ b/config/crd/bases/aiven.io_kafkas.yaml @@ -620,7 +620,7 @@ spec: description: "Enable verification that checks that the partition has been added to the transaction before writing transactional - records to the partition. (Default: false)" + records to the partition. (Default: true)" type: boolean transaction_remove_expired_transaction_cleanup_interval_ms: description: @@ -977,6 +977,19 @@ spec: minimum: 10 type: integer type: object + kafka_sasl_mechanisms: + description: Kafka SASL mechanisms + properties: + plain: + description: Enable PLAIN mechanism + type: boolean + scram_sha_256: + description: Enable SCRAM-SHA-256 mechanism + type: boolean + scram_sha_512: + description: Enable SCRAM-SHA-512 mechanism + type: boolean + type: object kafka_version: description: Kafka major version enum: diff --git a/config/crd/bases/aiven.io_opensearches.yaml b/config/crd/bases/aiven.io_opensearches.yaml index 6be0fa44..3b23172b 100644 --- a/config/crd/bases/aiven.io_opensearches.yaml +++ b/config/crd/bases/aiven.io_opensearches.yaml @@ -230,6 +230,61 @@ spec: type: string maxItems: 1 type: array + azure_migration: + properties: + account: + description: Azure account name + pattern: ^[^\r\n]*$ + type: string + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + container: + description: Azure container name + pattern: ^[^\r\n]*$ + type: string + endpoint_suffix: + description: Defines the DNS suffix for Azure Storage endpoints. + pattern: ^[^\r\n]*$ + type: string + key: + description: + Azure account secret key. One of key or sas_token + should be specified + pattern: ^[^\r\n]*$ + type: string + sas_token: + description: + A shared access signatures (SAS) token. One of + key or sas_token should be specified + pattern: ^[^\r\n]*$ + type: string + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - account + - base_path + - container + - snapshot_name + type: object custom_domain: description: Serve the web frontend using a custom CNAME pointing @@ -244,6 +299,45 @@ spec: to potential data loss in case of losing a service node, this setting can no longer be activated." type: boolean + gcs_migration: + properties: + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + bucket: + description: The path to the repository data within its container + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + credentials: + description: Google Cloud Storage credentials file content + pattern: ^[^\r\n]*$ + type: string + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - base_path + - bucket + - credentials + - snapshot_name + type: object index_patterns: description: Index patterns items: @@ -279,6 +373,39 @@ spec: type: object maxItems: 512 type: array + index_rollup: + description: Index rollup settings + properties: + rollup_dashboards_enabled: + description: + Whether rollups are enabled in OpenSearch Dashboards. + Defaults to true. + type: boolean + rollup_enabled: + description: + Whether the rollup plugin is enabled. Defaults + to true. + type: boolean + rollup_search_backoff_count: + description: + How many retries the plugin should attempt for + failed rollup jobs. Defaults to 5. + minimum: 1 + type: integer + rollup_search_backoff_millis: + description: + The backoff time between retries for failed rollup + jobs. Defaults to 1000ms. + minimum: 1 + type: integer + rollup_search_search_all_jobs: + description: + Whether OpenSearch should return all jobs that + match all specified search terms. If disabled, OpenSearch + returns just one, as opposed to all, of the jobs that matches + the search terms. Defaults to false. + type: boolean + type: object index_template: description: Template settings for all new indexes properties: @@ -361,6 +488,7 @@ spec: The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. maxLength: 2048 + pattern: ^[^\r\n]*$ type: string enabled: description: @@ -744,6 +872,7 @@ spec: number of inline script compilations within a period of time. Default is use-context maxLength: 1024 + pattern: ^[^\r\n]*$ type: string search_max_buckets: description: @@ -951,6 +1080,67 @@ spec: maxLength: 128 pattern: ^[a-zA-Z0-9-_:.]+$ type: string + s3_migration: + properties: + access_key: + description: AWS Access key + pattern: ^[^\r\n]*$ + type: string + base_path: + description: + The path to the repository data within its container. + The value of this setting should not start or end with a + / + pattern: ^[^\r\n]*$ + type: string + bucket: + description: S3 bucket name + pattern: ^[^\r\n]*$ + type: string + chunk_size: + description: + Big files can be broken down into chunks during + snapshotting if needed. Should be the same as for the 3rd + party repository + pattern: ^[^\r\n]*$ + type: string + compress: + description: + when set to true metadata files are stored in + compressed format + type: boolean + endpoint: + description: + The S3 service endpoint to connect to. If you + are using an S3-compatible service then you should set this + to the service’s endpoint + pattern: ^[^\r\n]*$ + type: string + region: + description: S3 region + pattern: ^[^\r\n]*$ + type: string + secret_key: + description: AWS secret key + pattern: ^[^\r\n]*$ + type: string + server_side_encryption: + description: + When set to true files are encrypted on server + side + type: boolean + snapshot_name: + description: The snapshot name to restore from + pattern: ^[^\r\n]*$ + type: string + required: + - access_key + - base_path + - bucket + - region + - secret_key + - snapshot_name + type: object saml: description: OpenSearch SAML configuration properties: @@ -976,6 +1166,7 @@ spec: with the IdP. maxLength: 2048 minLength: 1 + pattern: ^[^\r\n]*$ type: string idp_pemtrustedcas_content: description: diff --git a/docs/docs/api-reference/grafana.md b/docs/docs/api-reference/grafana.md index 1d59d4d3..935b4e8a 100644 --- a/docs/docs/api-reference/grafana.md +++ b/docs/docs/api-reference/grafana.md @@ -241,6 +241,7 @@ Cassandra specific user configuration options. - [`user_auto_assign_org`](#spec.userConfig.user_auto_assign_org-property){: name='spec.userConfig.user_auto_assign_org-property'} (boolean). Auto-assign new users on signup to main organization. Defaults to false. - [`user_auto_assign_org_role`](#spec.userConfig.user_auto_assign_org_role-property){: name='spec.userConfig.user_auto_assign_org_role-property'} (string, Enum: `Viewer`, `Admin`, `Editor`). Set role for new signups. Defaults to Viewer. - [`viewers_can_edit`](#spec.userConfig.viewers_can_edit-property){: name='spec.userConfig.viewers_can_edit-property'} (boolean). Users with view-only permission can edit but not save dashboards. +- [`wal`](#spec.userConfig.wal-property){: name='spec.userConfig.wal-property'} (boolean). Setting to enable/disable Write-Ahead Logging. The default value is false (disabled). ### auth_azuread {: #spec.userConfig.auth_azuread } diff --git a/docs/docs/api-reference/kafka.md b/docs/docs/api-reference/kafka.md index 583f9fcc..e7620f51 100644 --- a/docs/docs/api-reference/kafka.md +++ b/docs/docs/api-reference/kafka.md @@ -216,6 +216,7 @@ Kafka specific user configuration options. - [`kafka_rest`](#spec.userConfig.kafka_rest-property){: name='spec.userConfig.kafka_rest-property'} (boolean). Enable Kafka-REST service. - [`kafka_rest_authorization`](#spec.userConfig.kafka_rest_authorization-property){: name='spec.userConfig.kafka_rest_authorization-property'} (boolean). Enable authorization in Kafka-REST service. - [`kafka_rest_config`](#spec.userConfig.kafka_rest_config-property){: name='spec.userConfig.kafka_rest_config-property'} (object). Kafka REST configuration. See below for [nested schema](#spec.userConfig.kafka_rest_config). +- [`kafka_sasl_mechanisms`](#spec.userConfig.kafka_sasl_mechanisms-property){: name='spec.userConfig.kafka_sasl_mechanisms-property'} (object). Kafka SASL mechanisms. See below for [nested schema](#spec.userConfig.kafka_sasl_mechanisms). - [`kafka_version`](#spec.userConfig.kafka_version-property){: name='spec.userConfig.kafka_version-property'} (string, Enum: `3.4`, `3.5`, `3.6`, `3.7`). Kafka major version. - [`letsencrypt_sasl_privatelink`](#spec.userConfig.letsencrypt_sasl_privatelink-property){: name='spec.userConfig.letsencrypt_sasl_privatelink-property'} (boolean). Use Letsencrypt CA for Kafka SASL via Privatelink. - [`private_access`](#spec.userConfig.private_access-property){: name='spec.userConfig.private_access-property'} (object). Allow access to selected service ports from private networks. See below for [nested schema](#spec.userConfig.private_access). @@ -302,7 +303,7 @@ Kafka broker configuration values. - [`sasl_oauthbearer_jwks_endpoint_url`](#spec.userConfig.kafka.sasl_oauthbearer_jwks_endpoint_url-property){: name='spec.userConfig.kafka.sasl_oauthbearer_jwks_endpoint_url-property'} (string, MaxLength: 2048). OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null). - [`sasl_oauthbearer_sub_claim_name`](#spec.userConfig.kafka.sasl_oauthbearer_sub_claim_name-property){: name='spec.userConfig.kafka.sasl_oauthbearer_sub_claim_name-property'} (string, Pattern: `^[^\r\n]*\S[^\r\n]*$`, MaxLength: 128). Name of the scope from which to extract the subject claim from the JWT.(Default: sub). - [`socket_request_max_bytes`](#spec.userConfig.kafka.socket_request_max_bytes-property){: name='spec.userConfig.kafka.socket_request_max_bytes-property'} (integer, Minimum: 10485760, Maximum: 209715200). The maximum number of bytes in a socket request (Default: 104857600 bytes). -- [`transaction_partition_verification_enable`](#spec.userConfig.kafka.transaction_partition_verification_enable-property){: name='spec.userConfig.kafka.transaction_partition_verification_enable-property'} (boolean). Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false). +- [`transaction_partition_verification_enable`](#spec.userConfig.kafka.transaction_partition_verification_enable-property){: name='spec.userConfig.kafka.transaction_partition_verification_enable-property'} (boolean). Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true). - [`transaction_remove_expired_transaction_cleanup_interval_ms`](#spec.userConfig.kafka.transaction_remove_expired_transaction_cleanup_interval_ms-property){: name='spec.userConfig.kafka.transaction_remove_expired_transaction_cleanup_interval_ms-property'} (integer, Minimum: 600000, Maximum: 3600000). The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). - [`transaction_state_log_segment_bytes`](#spec.userConfig.kafka.transaction_state_log_segment_bytes-property){: name='spec.userConfig.kafka.transaction_state_log_segment_bytes-property'} (integer, Minimum: 1048576, Maximum: 2147483647). The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). @@ -408,6 +409,18 @@ Kafka REST configuration. - [`producer_max_request_size`](#spec.userConfig.kafka_rest_config.producer_max_request_size-property){: name='spec.userConfig.kafka_rest_config.producer_max_request_size-property'} (integer, Minimum: 0, Maximum: 2147483647). The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. - [`simpleconsumer_pool_size_max`](#spec.userConfig.kafka_rest_config.simpleconsumer_pool_size_max-property){: name='spec.userConfig.kafka_rest_config.simpleconsumer_pool_size_max-property'} (integer, Minimum: 10, Maximum: 250). Maximum number of SimpleConsumers that can be instantiated per broker. +### kafka_sasl_mechanisms {: #spec.userConfig.kafka_sasl_mechanisms } + +_Appears on [`spec.userConfig`](#spec.userConfig)._ + +Kafka SASL mechanisms. + +**Optional** + +- [`plain`](#spec.userConfig.kafka_sasl_mechanisms.plain-property){: name='spec.userConfig.kafka_sasl_mechanisms.plain-property'} (boolean). Enable PLAIN mechanism. +- [`scram_sha_256`](#spec.userConfig.kafka_sasl_mechanisms.scram_sha_256-property){: name='spec.userConfig.kafka_sasl_mechanisms.scram_sha_256-property'} (boolean). Enable SCRAM-SHA-256 mechanism. +- [`scram_sha_512`](#spec.userConfig.kafka_sasl_mechanisms.scram_sha_512-property){: name='spec.userConfig.kafka_sasl_mechanisms.scram_sha_512-property'} (boolean). Enable SCRAM-SHA-512 mechanism. + ### private_access {: #spec.userConfig.private_access } _Appears on [`spec.userConfig`](#spec.userConfig)._ diff --git a/docs/docs/api-reference/opensearch.md b/docs/docs/api-reference/opensearch.md index bcf40936..159cfa3e 100644 --- a/docs/docs/api-reference/opensearch.md +++ b/docs/docs/api-reference/opensearch.md @@ -193,9 +193,12 @@ OpenSearch specific user configuration options. **Optional** - [`additional_backup_regions`](#spec.userConfig.additional_backup_regions-property){: name='spec.userConfig.additional_backup_regions-property'} (array of strings, MaxItems: 1). Additional Cloud Regions for Backup Replication. +- [`azure_migration`](#spec.userConfig.azure_migration-property){: name='spec.userConfig.azure_migration-property'} (object). See below for [nested schema](#spec.userConfig.azure_migration). - [`custom_domain`](#spec.userConfig.custom_domain-property){: name='spec.userConfig.custom_domain-property'} (string, MaxLength: 255). Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. - [`disable_replication_factor_adjustment`](#spec.userConfig.disable_replication_factor_adjustment-property){: name='spec.userConfig.disable_replication_factor_adjustment-property'} (boolean). DEPRECATED: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated. +- [`gcs_migration`](#spec.userConfig.gcs_migration-property){: name='spec.userConfig.gcs_migration-property'} (object). See below for [nested schema](#spec.userConfig.gcs_migration). - [`index_patterns`](#spec.userConfig.index_patterns-property){: name='spec.userConfig.index_patterns-property'} (array of objects, MaxItems: 512). Index patterns. See below for [nested schema](#spec.userConfig.index_patterns). +- [`index_rollup`](#spec.userConfig.index_rollup-property){: name='spec.userConfig.index_rollup-property'} (object). Index rollup settings. See below for [nested schema](#spec.userConfig.index_rollup). - [`index_template`](#spec.userConfig.index_template-property){: name='spec.userConfig.index_template-property'} (object). Template settings for all new indexes. See below for [nested schema](#spec.userConfig.index_template). - [`ip_filter`](#spec.userConfig.ip_filter-property){: name='spec.userConfig.ip_filter-property'} (array of objects, MaxItems: 1024). Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`. See below for [nested schema](#spec.userConfig.ip_filter). - [`keep_index_refresh_interval`](#spec.userConfig.keep_index_refresh_interval-property){: name='spec.userConfig.keep_index_refresh_interval-property'} (boolean). Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true. @@ -209,11 +212,47 @@ OpenSearch specific user configuration options. - [`project_to_fork_from`](#spec.userConfig.project_to_fork_from-property){: name='spec.userConfig.project_to_fork_from-property'} (string, Immutable, Pattern: `^[a-z][-a-z0-9]{0,63}$|^$`, MaxLength: 63). Name of another project to fork a service from. This has effect only when a new service is being created. - [`public_access`](#spec.userConfig.public_access-property){: name='spec.userConfig.public_access-property'} (object). Allow access to selected service ports from the public Internet. See below for [nested schema](#spec.userConfig.public_access). - [`recovery_basebackup_name`](#spec.userConfig.recovery_basebackup_name-property){: name='spec.userConfig.recovery_basebackup_name-property'} (string, Pattern: `^[a-zA-Z0-9-_:.]+$`, MaxLength: 128). Name of the basebackup to restore in forked service. +- [`s3_migration`](#spec.userConfig.s3_migration-property){: name='spec.userConfig.s3_migration-property'} (object). See below for [nested schema](#spec.userConfig.s3_migration). - [`saml`](#spec.userConfig.saml-property){: name='spec.userConfig.saml-property'} (object). OpenSearch SAML configuration. See below for [nested schema](#spec.userConfig.saml). - [`service_log`](#spec.userConfig.service_log-property){: name='spec.userConfig.service_log-property'} (boolean). Store logs for the service so that they are available in the HTTP API and console. - [`service_to_fork_from`](#spec.userConfig.service_to_fork_from-property){: name='spec.userConfig.service_to_fork_from-property'} (string, Immutable, Pattern: `^[a-z][-a-z0-9]{0,63}$|^$`, MaxLength: 64). Name of another service to fork from. This has effect only when a new service is being created. - [`static_ips`](#spec.userConfig.static_ips-property){: name='spec.userConfig.static_ips-property'} (boolean). Use static public IP addresses. +### azure_migration {: #spec.userConfig.azure_migration } + +_Appears on [`spec.userConfig`](#spec.userConfig)._ + +**Required** + +- [`account`](#spec.userConfig.azure_migration.account-property){: name='spec.userConfig.azure_migration.account-property'} (string, Pattern: `^[^\r\n]*$`). Azure account name. +- [`base_path`](#spec.userConfig.azure_migration.base_path-property){: name='spec.userConfig.azure_migration.base_path-property'} (string, Pattern: `^[^\r\n]*$`). The path to the repository data within its container. The value of this setting should not start or end with a /. +- [`container`](#spec.userConfig.azure_migration.container-property){: name='spec.userConfig.azure_migration.container-property'} (string, Pattern: `^[^\r\n]*$`). Azure container name. +- [`snapshot_name`](#spec.userConfig.azure_migration.snapshot_name-property){: name='spec.userConfig.azure_migration.snapshot_name-property'} (string, Pattern: `^[^\r\n]*$`). The snapshot name to restore from. + +**Optional** + +- [`chunk_size`](#spec.userConfig.azure_migration.chunk_size-property){: name='spec.userConfig.azure_migration.chunk_size-property'} (string, Pattern: `^[^\r\n]*$`). Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. +- [`compress`](#spec.userConfig.azure_migration.compress-property){: name='spec.userConfig.azure_migration.compress-property'} (boolean). when set to true metadata files are stored in compressed format. +- [`endpoint_suffix`](#spec.userConfig.azure_migration.endpoint_suffix-property){: name='spec.userConfig.azure_migration.endpoint_suffix-property'} (string, Pattern: `^[^\r\n]*$`). Defines the DNS suffix for Azure Storage endpoints. +- [`key`](#spec.userConfig.azure_migration.key-property){: name='spec.userConfig.azure_migration.key-property'} (string, Pattern: `^[^\r\n]*$`). Azure account secret key. One of key or sas_token should be specified. +- [`sas_token`](#spec.userConfig.azure_migration.sas_token-property){: name='spec.userConfig.azure_migration.sas_token-property'} (string, Pattern: `^[^\r\n]*$`). A shared access signatures (SAS) token. One of key or sas_token should be specified. + +### gcs_migration {: #spec.userConfig.gcs_migration } + +_Appears on [`spec.userConfig`](#spec.userConfig)._ + +**Required** + +- [`base_path`](#spec.userConfig.gcs_migration.base_path-property){: name='spec.userConfig.gcs_migration.base_path-property'} (string, Pattern: `^[^\r\n]*$`). The path to the repository data within its container. The value of this setting should not start or end with a /. +- [`bucket`](#spec.userConfig.gcs_migration.bucket-property){: name='spec.userConfig.gcs_migration.bucket-property'} (string, Pattern: `^[^\r\n]*$`). The path to the repository data within its container. +- [`credentials`](#spec.userConfig.gcs_migration.credentials-property){: name='spec.userConfig.gcs_migration.credentials-property'} (string, Pattern: `^[^\r\n]*$`). Google Cloud Storage credentials file content. +- [`snapshot_name`](#spec.userConfig.gcs_migration.snapshot_name-property){: name='spec.userConfig.gcs_migration.snapshot_name-property'} (string, Pattern: `^[^\r\n]*$`). The snapshot name to restore from. + +**Optional** + +- [`chunk_size`](#spec.userConfig.gcs_migration.chunk_size-property){: name='spec.userConfig.gcs_migration.chunk_size-property'} (string, Pattern: `^[^\r\n]*$`). Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. +- [`compress`](#spec.userConfig.gcs_migration.compress-property){: name='spec.userConfig.gcs_migration.compress-property'} (boolean). when set to true metadata files are stored in compressed format. + ### index_patterns {: #spec.userConfig.index_patterns } _Appears on [`spec.userConfig`](#spec.userConfig)._ @@ -229,6 +268,20 @@ Allows you to create glob style patterns and set a max number of indexes matchin - [`sorting_algorithm`](#spec.userConfig.index_patterns.sorting_algorithm-property){: name='spec.userConfig.index_patterns.sorting_algorithm-property'} (string, Enum: `alphabetical`, `creation_date`). Deletion sorting algorithm. +### index_rollup {: #spec.userConfig.index_rollup } + +_Appears on [`spec.userConfig`](#spec.userConfig)._ + +Index rollup settings. + +**Optional** + +- [`rollup_dashboards_enabled`](#spec.userConfig.index_rollup.rollup_dashboards_enabled-property){: name='spec.userConfig.index_rollup.rollup_dashboards_enabled-property'} (boolean). Whether rollups are enabled in OpenSearch Dashboards. Defaults to true. +- [`rollup_enabled`](#spec.userConfig.index_rollup.rollup_enabled-property){: name='spec.userConfig.index_rollup.rollup_enabled-property'} (boolean). Whether the rollup plugin is enabled. Defaults to true. +- [`rollup_search_backoff_count`](#spec.userConfig.index_rollup.rollup_search_backoff_count-property){: name='spec.userConfig.index_rollup.rollup_search_backoff_count-property'} (integer, Minimum: 1). How many retries the plugin should attempt for failed rollup jobs. Defaults to 5. +- [`rollup_search_backoff_millis`](#spec.userConfig.index_rollup.rollup_search_backoff_millis-property){: name='spec.userConfig.index_rollup.rollup_search_backoff_millis-property'} (integer, Minimum: 1). The backoff time between retries for failed rollup jobs. Defaults to 1000ms. +- [`rollup_search_search_all_jobs`](#spec.userConfig.index_rollup.rollup_search_search_all_jobs-property){: name='spec.userConfig.index_rollup.rollup_search_search_all_jobs-property'} (boolean). Whether OpenSearch should return all jobs that match all specified search terms. If disabled, OpenSearch returns just one, as opposed to all, of the jobs that matches the search terms. Defaults to false. + ### index_template {: #spec.userConfig.index_template } _Appears on [`spec.userConfig`](#spec.userConfig)._ @@ -265,7 +318,7 @@ OpenSearch OpenID Connect Configuration. - [`client_id`](#spec.userConfig.openid.client_id-property){: name='spec.userConfig.openid.client_id-property'} (string, Pattern: `^[^\r\n]*$`, MinLength: 1, MaxLength: 1024). The ID of the OpenID Connect client configured in your IdP. Required. - [`client_secret`](#spec.userConfig.openid.client_secret-property){: name='spec.userConfig.openid.client_secret-property'} (string, Pattern: `^[^\r\n]*$`, MinLength: 1, MaxLength: 1024). The client secret of the OpenID Connect client configured in your IdP. Required. -- [`connect_url`](#spec.userConfig.openid.connect_url-property){: name='spec.userConfig.openid.connect_url-property'} (string, MaxLength: 2048). The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. +- [`connect_url`](#spec.userConfig.openid.connect_url-property){: name='spec.userConfig.openid.connect_url-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 2048). The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. - [`enabled`](#spec.userConfig.openid.enabled-property){: name='spec.userConfig.openid.enabled-property'} (boolean). Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. **Optional** @@ -318,7 +371,7 @@ OpenSearch settings. - [`override_main_response_version`](#spec.userConfig.opensearch.override_main_response_version-property){: name='spec.userConfig.opensearch.override_main_response_version-property'} (boolean). Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false. - [`plugins_alerting_filter_by_backend_roles`](#spec.userConfig.opensearch.plugins_alerting_filter_by_backend_roles-property){: name='spec.userConfig.opensearch.plugins_alerting_filter_by_backend_roles-property'} (boolean). Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false. - [`reindex_remote_whitelist`](#spec.userConfig.opensearch.reindex_remote_whitelist-property){: name='spec.userConfig.opensearch.reindex_remote_whitelist-property'} (array of strings, MaxItems: 32). Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. -- [`script_max_compilations_rate`](#spec.userConfig.opensearch.script_max_compilations_rate-property){: name='spec.userConfig.opensearch.script_max_compilations_rate-property'} (string, MaxLength: 1024). Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context. +- [`script_max_compilations_rate`](#spec.userConfig.opensearch.script_max_compilations_rate-property){: name='spec.userConfig.opensearch.script_max_compilations_rate-property'} (string, Pattern: `^[^\r\n]*$`, MaxLength: 1024). Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context. - [`search_max_buckets`](#spec.userConfig.opensearch.search_max_buckets-property){: name='spec.userConfig.opensearch.search_max_buckets-property'} (integer, Minimum: 1, Maximum: 1000000). Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined. - [`thread_pool_analyze_queue_size`](#spec.userConfig.opensearch.thread_pool_analyze_queue_size-property){: name='spec.userConfig.opensearch.thread_pool_analyze_queue_size-property'} (integer, Minimum: 10, Maximum: 2000). Size for the thread pool queue. See documentation for exact details. - [`thread_pool_analyze_size`](#spec.userConfig.opensearch.thread_pool_analyze_size-property){: name='spec.userConfig.opensearch.thread_pool_analyze_size-property'} (integer, Minimum: 1, Maximum: 128). Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. @@ -420,6 +473,26 @@ Allow access to selected service ports from the public Internet. - [`opensearch_dashboards`](#spec.userConfig.public_access.opensearch_dashboards-property){: name='spec.userConfig.public_access.opensearch_dashboards-property'} (boolean). Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network. - [`prometheus`](#spec.userConfig.public_access.prometheus-property){: name='spec.userConfig.public_access.prometheus-property'} (boolean). Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network. +### s3_migration {: #spec.userConfig.s3_migration } + +_Appears on [`spec.userConfig`](#spec.userConfig)._ + +**Required** + +- [`access_key`](#spec.userConfig.s3_migration.access_key-property){: name='spec.userConfig.s3_migration.access_key-property'} (string, Pattern: `^[^\r\n]*$`). AWS Access key. +- [`base_path`](#spec.userConfig.s3_migration.base_path-property){: name='spec.userConfig.s3_migration.base_path-property'} (string, Pattern: `^[^\r\n]*$`). The path to the repository data within its container. The value of this setting should not start or end with a /. +- [`bucket`](#spec.userConfig.s3_migration.bucket-property){: name='spec.userConfig.s3_migration.bucket-property'} (string, Pattern: `^[^\r\n]*$`). S3 bucket name. +- [`region`](#spec.userConfig.s3_migration.region-property){: name='spec.userConfig.s3_migration.region-property'} (string, Pattern: `^[^\r\n]*$`). S3 region. +- [`secret_key`](#spec.userConfig.s3_migration.secret_key-property){: name='spec.userConfig.s3_migration.secret_key-property'} (string, Pattern: `^[^\r\n]*$`). AWS secret key. +- [`snapshot_name`](#spec.userConfig.s3_migration.snapshot_name-property){: name='spec.userConfig.s3_migration.snapshot_name-property'} (string, Pattern: `^[^\r\n]*$`). The snapshot name to restore from. + +**Optional** + +- [`chunk_size`](#spec.userConfig.s3_migration.chunk_size-property){: name='spec.userConfig.s3_migration.chunk_size-property'} (string, Pattern: `^[^\r\n]*$`). Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. +- [`compress`](#spec.userConfig.s3_migration.compress-property){: name='spec.userConfig.s3_migration.compress-property'} (boolean). when set to true metadata files are stored in compressed format. +- [`endpoint`](#spec.userConfig.s3_migration.endpoint-property){: name='spec.userConfig.s3_migration.endpoint-property'} (string, Pattern: `^[^\r\n]*$`). The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint. +- [`server_side_encryption`](#spec.userConfig.s3_migration.server_side_encryption-property){: name='spec.userConfig.s3_migration.server_side_encryption-property'} (boolean). When set to true files are encrypted on server side. + ### saml {: #spec.userConfig.saml } _Appears on [`spec.userConfig`](#spec.userConfig)._ @@ -430,7 +503,7 @@ OpenSearch SAML configuration. - [`enabled`](#spec.userConfig.saml.enabled-property){: name='spec.userConfig.saml.enabled-property'} (boolean). Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. - [`idp_entity_id`](#spec.userConfig.saml.idp_entity_id-property){: name='spec.userConfig.saml.idp_entity_id-property'} (string, Pattern: `^[^\r\n]*$`, MinLength: 1, MaxLength: 1024). The unique identifier for the Identity Provider (IdP) entity that is used for SAML authentication. This value is typically provided by the IdP. -- [`idp_metadata_url`](#spec.userConfig.saml.idp_metadata_url-property){: name='spec.userConfig.saml.idp_metadata_url-property'} (string, MinLength: 1, MaxLength: 2048). The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP. +- [`idp_metadata_url`](#spec.userConfig.saml.idp_metadata_url-property){: name='spec.userConfig.saml.idp_metadata_url-property'} (string, Pattern: `^[^\r\n]*$`, MinLength: 1, MaxLength: 2048). The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP. - [`sp_entity_id`](#spec.userConfig.saml.sp_entity_id-property){: name='spec.userConfig.saml.sp_entity_id-property'} (string, Pattern: `^[^\r\n]*$`, MinLength: 1, MaxLength: 1024). The unique identifier for the Service Provider (SP) entity that is used for SAML authentication. This value is typically provided by the SP. **Optional**