diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ccb9df5b..5fd6d4d7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,15 @@ nav_order: 1 ## [MAJOR.MINOR.PATCH] - YYYY-MM-DD - Add support for `autoscaler` service integration +- Add `aiven_opensearch` resource field `include_aliases`: Whether to restore aliases alongside their associated indexes. Default is true. +- Add `aiven_opensearch` resource field `include_aliases`: Whether to restore aliases alongside their associated indexes. Default is true. +- Add `aiven_opensearch` resource field `include_aliases`: Whether to restore aliases alongside their associated indexes. Default is true. +- Add `aiven_opensearch` datasource field `include_aliases`: Whether to restore aliases alongside their associated indexes. Default is true. +- Add `aiven_opensearch` datasource field `include_aliases`: Whether to restore aliases alongside their associated indexes. Default is true. +- Add `aiven_opensearch` datasource field `include_aliases`: Whether to restore aliases alongside their associated indexes. Default is true. +- Change `aiven_cassandra` resource field `additional_backup_regions`: remove deprecation +- Change `aiven_cassandra` datasource field `additional_backup_regions`: remove deprecation + ## [4.28.0] - 2024-10-21 diff --git a/docs/data-sources/account_team_project.md b/docs/data-sources/account_team_project.md index 5ff414251..4224a55c4 100644 --- a/docs/data-sources/account_team_project.md +++ b/docs/data-sources/account_team_project.md @@ -32,4 +32,4 @@ data "aiven_account_team_project" "account_team_project1" { ### Read-Only - `id` (String) The ID of this resource. -- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. +- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `services:maintenance`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. diff --git a/docs/data-sources/opensearch.md b/docs/data-sources/opensearch.md index ac40e3f0f..d5b444b3e 100644 --- a/docs/data-sources/opensearch.md +++ b/docs/data-sources/opensearch.md @@ -128,6 +128,7 @@ Read-Only: - `compress` (Boolean) - `container` (String) - `endpoint_suffix` (String) +- `include_aliases` (Boolean) - `indices` (String) - `key` (String) - `restore_global_state` (Boolean) @@ -145,6 +146,7 @@ Read-Only: - `chunk_size` (String) - `compress` (Boolean) - `credentials` (String) +- `include_aliases` (Boolean) - `indices` (String) - `restore_global_state` (Boolean) - `snapshot_name` (String) @@ -449,6 +451,7 @@ Read-Only: - `chunk_size` (String) - `compress` (Boolean) - `endpoint` (String) +- `include_aliases` (Boolean) - `indices` (String) - `region` (String) - `restore_global_state` (Boolean) diff --git a/docs/data-sources/project_user.md b/docs/data-sources/project_user.md index 889026e35..3c7511bae 100644 --- a/docs/data-sources/project_user.md +++ b/docs/data-sources/project_user.md @@ -31,4 +31,4 @@ data "aiven_project_user" "mytestuser" { - `accepted` (Boolean) Whether the user has accepted the request to join the project. Users get an invite and become project members after accepting the invite. - `id` (String) The ID of this resource. -- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `project:services:read`, `read_only` and `service:logs:read`. +- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `read_only`, `service:configuration:write`, `service:logs:read` and `services:maintenance`. diff --git a/docs/resources/account_team_project.md b/docs/resources/account_team_project.md index c3c7577f9..6c507dff6 100644 --- a/docs/resources/account_team_project.md +++ b/docs/resources/account_team_project.md @@ -48,7 +48,7 @@ resource "aiven_account_team_project" "main" { ### Optional - `project_name` (String) The name of an already existing project -- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. +- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `services:maintenance`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only diff --git a/docs/resources/cassandra.md b/docs/resources/cassandra.md index 99a58ce6d..ba99e4948 100644 --- a/docs/resources/cassandra.md +++ b/docs/resources/cassandra.md @@ -86,7 +86,7 @@ Optional: Optional: -- `additional_backup_regions` (List of String, Deprecated) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. Example: `3`. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. Example: `30`. - `cassandra` (Block List, Max: 1) Cassandra configuration values (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) diff --git a/docs/resources/dragonfly.md b/docs/resources/dragonfly.md index 354123b19..04d301612 100644 --- a/docs/resources/dragonfly.md +++ b/docs/resources/dragonfly.md @@ -87,7 +87,7 @@ Read-Only: Optional: - `cache_mode` (Boolean) Evict entries when getting close to maxmemory limit. Default: `false`. -- `dragonfly_persistence` (String) Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. +- `dragonfly_persistence` (String) Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. - `dragonfly_ssl` (Boolean) Require SSL to access Dragonfly. Default: `true`. - `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`. - `ip_filter_object` (Block Set, Max: 1024) Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16` (see [below for nested schema](#nestedblock--dragonfly_user_config--ip_filter_object)) diff --git a/docs/resources/grafana.md b/docs/resources/grafana.md index 5d6295dc8..0a611c0e8 100644 --- a/docs/resources/grafana.md +++ b/docs/resources/grafana.md @@ -90,7 +90,7 @@ Optional: - `alerting_enabled` (Boolean) Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled. - `alerting_error_or_timeout` (String) Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules. - `alerting_max_annotations_to_keep` (Number) Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`. -- `alerting_nodata_or_nullvalues` (String) Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules. +- `alerting_nodata_or_nullvalues` (String) Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules. - `allow_embedding` (Boolean) Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking. - `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) - `auth_basic_enabled` (Boolean) Enable or disable basic authentication form, used by Grafana built-in login. @@ -98,7 +98,7 @@ Optional: - `auth_github` (Block List, Max: 1) Github Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) - `auth_gitlab` (Block List, Max: 1) GitLab Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) - `auth_google` (Block List, Max: 1) Google Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) -- `cookie_samesite` (String) Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value. +- `cookie_samesite` (String) Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`. - `dashboard_previews_enabled` (Boolean) This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering. - `dashboards_min_refresh_interval` (String) Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`. @@ -126,7 +126,7 @@ Optional: - `static_ips` (Boolean) Use static public IP addresses. - `unified_alerting_enabled` (Boolean) Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details. - `user_auto_assign_org` (Boolean) Auto-assign new users on signup to main organization. Defaults to false. -- `user_auto_assign_org_role` (String) Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer. +- `user_auto_assign_org_role` (String) Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer. - `viewers_can_edit` (Boolean) Users with view-only permission can edit but not save dashboards. - `wal` (Boolean) Setting to enable/disable Write-Ahead Logging. The default value is false (disabled). @@ -293,7 +293,7 @@ Optional: - `from_name` (String) Name used in outgoing emails, defaults to Grafana. - `password` (String, Sensitive) Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`. - `skip_verify` (Boolean) Skip verifying server certificate. Defaults to false. -- `starttls_policy` (String) Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. +- `starttls_policy` (String) Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. - `username` (String) Username for SMTP authentication. Example: `smtpuser`. diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 33b45ca5f..d0718d50f 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -159,7 +159,7 @@ Optional: Optional: - `auto_create_topics_enable` (Boolean) Enable auto-creation of topics. (Default: true). -- `compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer). +- `compression_type` (String) Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer). - `connections_max_idle_ms` (Number) Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`. - `default_replication_factor` (Number) Replication factor for auto-created topics (Default: 3). - `group_initial_rebalance_delay_ms` (Number) The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`. @@ -169,7 +169,7 @@ Optional: - `log_cleaner_max_compaction_lag_ms` (Number) The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)). - `log_cleaner_min_cleanable_ratio` (Number) Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`. - `log_cleaner_min_compaction_lag_ms` (Number) The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms). -- `log_cleanup_policy` (String) Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete). +- `log_cleanup_policy` (String) Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete). - `log_flush_interval_messages` (Number) The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`. - `log_flush_interval_ms` (Number) The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null). - `log_index_interval_bytes` (Number) The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`. @@ -220,10 +220,10 @@ Optional: Optional: -- `connector_client_config_override_policy` (String) Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None. +- `connector_client_config_override_policy` (String) Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None. - `consumer_auto_offset_reset` (String) Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`. -- `consumer_isolation_level` (String) Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. +- `consumer_isolation_level` (String) Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`. - `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). - `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500). @@ -231,7 +231,7 @@ Optional: - `offset_flush_timeout_ms` (Number) Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). - `producer_batch_size` (Number) This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - `producer_buffer_memory` (Number) The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0. - `producer_max_request_size` (Number) This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`. - `scheduled_rebalance_max_delay_ms` (Number) The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. @@ -288,10 +288,10 @@ Optional: - `consumer_enable_auto_commit` (Boolean) If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`. - `consumer_request_max_bytes` (Number) Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`. - `consumer_request_timeout_ms` (Number) Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`. -- `name_strategy` (String) Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`. +- `name_strategy` (String) Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`. - `name_strategy_validation` (Boolean) If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`. -- `producer_acks` (String) Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`. -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_acks` (String) Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) Wait for up to the given delay to allow batching records together. Default: `0`. - `producer_max_request_size` (Number) The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`. - `simpleconsumer_pool_size_max` (Number) Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`. diff --git a/docs/resources/kafka_connect.md b/docs/resources/kafka_connect.md index c6976029c..f5362fe52 100644 --- a/docs/resources/kafka_connect.md +++ b/docs/resources/kafka_connect.md @@ -142,10 +142,10 @@ Optional: Optional: -- `connector_client_config_override_policy` (String) Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None. +- `connector_client_config_override_policy` (String) Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None. - `consumer_auto_offset_reset` (String) Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`. -- `consumer_isolation_level` (String) Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. +- `consumer_isolation_level` (String) Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`. - `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). - `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500). @@ -153,7 +153,7 @@ Optional: - `offset_flush_timeout_ms` (Number) Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). - `producer_batch_size` (Number) This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - `producer_buffer_memory` (Number) The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0. - `producer_max_request_size` (Number) This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`. - `scheduled_rebalance_max_delay_ms` (Number) The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. diff --git a/docs/resources/mysql.md b/docs/resources/mysql.md index d920ea288..f81376e2c 100644 --- a/docs/resources/mysql.md +++ b/docs/resources/mysql.md @@ -185,8 +185,8 @@ Optional: - `innodb_thread_concurrency` (Number) Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`. - `innodb_write_io_threads` (Number) The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`. - `interactive_timeout` (Number) The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`. -- `internal_tmp_mem_storage_engine` (String) Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables. -- `log_output` (String) Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE. +- `internal_tmp_mem_storage_engine` (String) Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables. +- `log_output` (String) Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE. - `long_query_time` (Number) The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`. - `max_allowed_packet` (Number) Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`. - `max_heap_table_size` (Number) Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M). Example: `16777216`. diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md index 8a1da4414..9f8b6f50b 100644 --- a/docs/resources/opensearch.md +++ b/docs/resources/opensearch.md @@ -142,6 +142,7 @@ Optional: - `chunk_size` (String) Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. - `compress` (Boolean) When set to true metadata files are stored in compressed format. - `endpoint_suffix` (String) Defines the DNS suffix for Azure Storage endpoints. +- `include_aliases` (Boolean) Whether to restore aliases alongside their associated indexes. Default is true. - `indices` (String) A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`. - `key` (String, Sensitive) Azure account secret key. One of key or sas_token should be specified. - `restore_global_state` (Boolean) If true, restore the cluster state. Defaults to false. @@ -162,6 +163,7 @@ Optional: - `chunk_size` (String) Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. - `compress` (Boolean) When set to true metadata files are stored in compressed format. +- `include_aliases` (Boolean) Whether to restore aliases alongside their associated indexes. Default is true. - `indices` (String) A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`. - `restore_global_state` (Boolean) If true, restore the cluster state. Defaults to false. @@ -328,7 +330,7 @@ Optional: Optional: -- `mode` (String) Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only. +- `mode` (String) Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only. - `node_duress` (Block List, Max: 1) Node duress settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure--node_duress)) - `search_shard_task` (Block List, Max: 1) Search shard settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure--search_shard_task)) - `search_task` (Block List, Max: 1) Search task settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure--search_task)) @@ -498,6 +500,7 @@ Optional: - `chunk_size` (String) Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. - `compress` (Boolean) When set to true metadata files are stored in compressed format. - `endpoint` (String) The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint. +- `include_aliases` (Boolean) Whether to restore aliases alongside their associated indexes. Default is true. - `indices` (String) A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`. - `restore_global_state` (Boolean) If true, restore the cluster state. Defaults to false. - `server_side_encryption` (Boolean) When set to true files are encrypted on server side. diff --git a/docs/resources/organization_group_project.md b/docs/resources/organization_group_project.md index fb372a6e0..aecb0661c 100644 --- a/docs/resources/organization_group_project.md +++ b/docs/resources/organization_group_project.md @@ -51,7 +51,7 @@ resource "aiven_organization_group_project" "example" { - `group_id` (String) The ID of the user group. - `project` (String) The project that the users in the group are members of. -- `role` (String) [Project-level role](https://aiven.io/docs/platform/reference/project-member-privileges) assigned to all users in the group. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. +- `role` (String) [Project-level role](https://aiven.io/docs/platform/reference/project-member-privileges) assigned to all users in the group. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `services:maintenance`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. ### Optional diff --git a/docs/resources/organization_permission.md b/docs/resources/organization_permission.md index 455d5f4af..2faeefb88 100644 --- a/docs/resources/organization_permission.md +++ b/docs/resources/organization_permission.md @@ -65,7 +65,7 @@ resource "aiven_organization_permission" "developers" { Required: -- `permissions` (Set of String) List of permissions. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `project:services:read`, `read_only` and `service:logs:read`. +- `permissions` (Set of String) List of permissions. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `read_only`, `service:configuration:write`, `service:logs:read` and `services:maintenance`. - `principal_id` (String) ID of the user or group. - `principal_type` (String) The type of principal. The possible values are `user` and `user_group`. diff --git a/docs/resources/pg.md b/docs/resources/pg.md index c34e410e1..0ef16702a 100644 --- a/docs/resources/pg.md +++ b/docs/resources/pg.md @@ -161,7 +161,7 @@ Optional: - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`. - `shared_buffers_percentage` (Number) Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`. - `static_ips` (Boolean) Use static public IP addresses. -- `synchronous_replication` (String) Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication. +- `synchronous_replication` (String) Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication. - `timescaledb` (Block List, Max: 1) System-wide settings for the timescaledb extension (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) - `variant` (String) Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default. - `work_mem` (Number) Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`. @@ -220,8 +220,8 @@ Optional: - `idle_in_transaction_session_timeout` (Number) Time out sessions with open transactions after this number of milliseconds. - `jit` (Boolean) Controls system-wide use of Just-in-Time Compilation (JIT). - `log_autovacuum_min_duration` (Number) Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions. -- `log_error_verbosity` (String) Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged. -- `log_line_prefix` (String) Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats. +- `log_error_verbosity` (String) Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged. +- `log_line_prefix` (String) Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats. - `log_min_duration_statement` (Number) Log statements that take more than this number of milliseconds to run, -1 disables. - `log_temp_files` (Number) Log statements for each temporary file created larger than this number of kilobytes, -1 disables. - `max_files_per_process` (Number) PostgreSQL maximum number of files that can be open per process. @@ -242,12 +242,12 @@ Optional: - `pg_partman_bgw__dot__role` (String) Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`. - `pg_stat_monitor__dot__pgsm_enable_query_plan` (Boolean) Enables or disables query plan monitoring. - `pg_stat_monitor__dot__pgsm_max_buckets` (Number) Sets the maximum number of buckets. Example: `10`. -- `pg_stat_statements__dot__track` (String) Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. +- `pg_stat_statements__dot__track` (String) Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. - `temp_file_limit` (Number) PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`. - `timezone` (String) PostgreSQL service timezone. Example: `Europe/Helsinki`. - `track_activity_query_size` (Number) Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`. - `track_commit_timestamp` (String) Enum: `off`, `on`. Record commit time of transactions. -- `track_functions` (String) Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used. +- `track_functions` (String) Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used. - `track_io_timing` (String) Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. - `wal_sender_timeout` (Number) Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`. - `wal_writer_delay` (Number) WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`. @@ -293,7 +293,7 @@ Optional: - `autodb_idle_timeout` (Number) If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`. - `autodb_max_db_connections` (Number) Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`. -- `autodb_pool_mode` (String) Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`. +- `autodb_pool_mode` (String) Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`. - `autodb_pool_size` (Number) If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`. - `ignore_startup_parameters` (List of String) List of parameters to ignore when given in startup packet. - `max_prepared_statements` (Number) PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`. diff --git a/docs/resources/project_user.md b/docs/resources/project_user.md index 384ab018a..b24c5f67d 100644 --- a/docs/resources/project_user.md +++ b/docs/resources/project_user.md @@ -33,7 +33,7 @@ resource "aiven_project_user" "mytestuser" { ### Required - `email` (String) Email address of the user in lowercase. Changing this property forces recreation of the resource. -- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `project:services:read`, `read_only` and `service:logs:read`. +- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `read_only`, `service:configuration:write`, `service:logs:read` and `services:maintenance`. - `project` (String) The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource. ### Optional diff --git a/docs/resources/redis.md b/docs/resources/redis.md index c2fa5105e..9da99480d 100644 --- a/docs/resources/redis.md +++ b/docs/resources/redis.md @@ -108,7 +108,7 @@ Optional: - `redis_io_threads` (Number) Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`. - `redis_lfu_decay_time` (Number) LFU maxmemory-policy counter decay time in minutes. Default: `1`. - `redis_lfu_log_factor` (Number) Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`. -- `redis_maxmemory_policy` (String) Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`. +- `redis_maxmemory_policy` (String) Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`. - `redis_notify_keyspace_events` (String) Set notify-keyspace-events option. - `redis_number_of_databases` (Number) Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`. - `redis_persistence` (String) Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index 580aa2dd1..c9ac9e0d0 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -101,14 +101,14 @@ Optional: Required: - `columns` (Block List, Min: 1, Max: 100) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) -- `data_format` (String) Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`. +- `data_format` (String) Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`. - `group_name` (String) Kafka consumers group. Default: `clickhouse`. - `name` (String) Name of the table. Example: `events`. - `topics` (Block List, Min: 1, Max: 100) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) Optional: -- `auto_offset_reset` (String) Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`. +- `auto_offset_reset` (String) Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`. - `date_time_input_format` (String) Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`. - `handle_error_mode` (String) Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. Default: `0`. @@ -313,7 +313,7 @@ Optional: - `consumer_max_poll_records` (Number) Set consumer max.poll.records. The default is 500. Example: `500`. - `producer_batch_size` (Number) The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`. - `producer_buffer_memory` (Number) The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`. -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) The linger time (ms) for waiting new data to arrive for publishing. Example: `100`. - `producer_max_request_size` (Number) The maximum request size in bytes. Example: `1048576`. diff --git a/docs/resources/service_integration_endpoint.md b/docs/resources/service_integration_endpoint.md index beb2f2747..9abb89273 100644 --- a/docs/resources/service_integration_endpoint.md +++ b/docs/resources/service_integration_endpoint.md @@ -108,7 +108,7 @@ Optional: - `kafka_consumer_check_instances` (Number) Number of separate instances to fetch kafka consumer statistics with. Example: `8`. - `kafka_consumer_stats_timeout` (Number) Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`. - `max_partition_contexts` (Number) Maximum number of partition contexts to send. Example: `32000`. -- `site` (String) Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com. +- `site` (String) Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com. ### Nested Schema for `datadog_user_config.datadog_tags` @@ -212,7 +212,7 @@ Required: Required: - `bootstrap_servers` (String) Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`. -- `security_protocol` (String) Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol. +- `security_protocol` (String) Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol. Optional: @@ -291,7 +291,7 @@ Optional: - `ssl_client_key` (String, Sensitive) Client key. Example: `-----BEGIN PRIVATE KEY----- ... -----END PRIVATE KEY-----`. -- `ssl_mode` (String) Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`. +- `ssl_mode` (String) Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`. - `ssl_root_cert` (String) SSL Root Cert. Example: `-----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -313,7 +313,7 @@ Optional: Required: -- `authentication` (String) Enum: `none`, `basic`. Authentication method. +- `authentication` (String) Enum: `basic`, `none`. Authentication method. - `url` (String) Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`. Optional: @@ -345,7 +345,7 @@ Optional: Required: -- `format` (String) Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`. +- `format` (String) Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`. - `port` (Number) Rsyslog server port. Default: `514`. - `server` (String) Rsyslog server IP address or hostname. Example: `logs.example.com`. - `tls` (Boolean) Require TLS. Default: `true`. diff --git a/docs/resources/valkey.md b/docs/resources/valkey.md index b25c06088..00623f1d9 100644 --- a/docs/resources/valkey.md +++ b/docs/resources/valkey.md @@ -143,7 +143,7 @@ Optional: - `valkey_io_threads` (Number) Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`. - `valkey_lfu_decay_time` (Number) LFU maxmemory-policy counter decay time in minutes. Default: `1`. - `valkey_lfu_log_factor` (Number) Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`. -- `valkey_maxmemory_policy` (String) Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`. +- `valkey_maxmemory_policy` (String) Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`. - `valkey_notify_keyspace_events` (String) Set notify-keyspace-events option. - `valkey_number_of_databases` (Number) Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`. - `valkey_persistence` (String) Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked. diff --git a/internal/sdkprovider/userconfig/service/cassandra.go b/internal/sdkprovider/userconfig/service/cassandra.go index 235f50f61..1dfd50851 100644 --- a/internal/sdkprovider/userconfig/service/cassandra.go +++ b/internal/sdkprovider/userconfig/service/cassandra.go @@ -14,7 +14,6 @@ func cassandraUserConfig() *schema.Schema { DiffSuppressFunc: diff.SuppressUnchanged, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "additional_backup_regions": { - Deprecated: "This property is deprecated.", Description: "Additional Cloud Regions for Backup Replication.", Elem: &schema.Schema{ Description: "Target cloud. Example: `aws-eu-central-1`.", diff --git a/internal/sdkprovider/userconfig/service/dragonfly.go b/internal/sdkprovider/userconfig/service/dragonfly.go index 6a6818846..fdac1e106 100644 --- a/internal/sdkprovider/userconfig/service/dragonfly.go +++ b/internal/sdkprovider/userconfig/service/dragonfly.go @@ -20,10 +20,10 @@ func dragonflyUserConfig() *schema.Schema { Type: schema.TypeBool, }, "dragonfly_persistence": { - Description: "Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.", + Description: "Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"off", "rdb", "dfs"}, false), + ValidateFunc: validation.StringInSlice([]string{"dfs", "off", "rdb"}, false), }, "dragonfly_ssl": { Description: "Require SSL to access Dragonfly. Default: `true`.", diff --git a/internal/sdkprovider/userconfig/service/grafana.go b/internal/sdkprovider/userconfig/service/grafana.go index 6a25cfbd1..9fa20c7d9 100644 --- a/internal/sdkprovider/userconfig/service/grafana.go +++ b/internal/sdkprovider/userconfig/service/grafana.go @@ -41,10 +41,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "alerting_nodata_or_nullvalues": { - Description: "Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.", + Description: "Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"alerting", "no_data", "keep_state", "ok"}, false), + ValidateFunc: validation.StringInSlice([]string{"alerting", "keep_state", "no_data", "ok"}, false), }, "allow_embedding": { Description: "Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.", @@ -332,10 +332,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeList, }, "cookie_samesite": { - Description: "Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.", + Description: "Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"lax", "strict", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"lax", "none", "strict"}, false), }, "custom_domain": { Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.", @@ -599,10 +599,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "starttls_policy": { - Description: "Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", + Description: "Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"OpportunisticStartTLS", "MandatoryStartTLS", "NoStartTLS"}, false), + ValidateFunc: validation.StringInSlice([]string{"MandatoryStartTLS", "NoStartTLS", "OpportunisticStartTLS"}, false), }, "username": { Description: "Username for SMTP authentication. Example: `smtpuser`.", @@ -630,10 +630,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "user_auto_assign_org_role": { - Description: "Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.", + Description: "Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"Viewer", "Admin", "Editor"}, false), + ValidateFunc: validation.StringInSlice([]string{"Admin", "Editor", "Viewer"}, false), }, "viewers_can_edit": { Description: "Users with view-only permission can edit but not save dashboards.", diff --git a/internal/sdkprovider/userconfig/service/kafka.go b/internal/sdkprovider/userconfig/service/kafka.go index 15361f3ba..104701bbf 100644 --- a/internal/sdkprovider/userconfig/service/kafka.go +++ b/internal/sdkprovider/userconfig/service/kafka.go @@ -94,10 +94,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).", + Description: "Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "uncompressed", "producer"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "producer", "snappy", "uncompressed", "zstd"}, false), }, "connections_max_idle_ms": { Description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.", @@ -145,10 +145,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_cleanup_policy": { - Description: "Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).", + Description: "Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"delete", "compact", "compact,delete"}, false), + ValidateFunc: validation.StringInSlice([]string{"compact", "compact,delete", "delete"}, false), }, "log_flush_interval_messages": { Description: "The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.", @@ -353,10 +353,10 @@ func kafkaUserConfig() *schema.Schema { Description: "Kafka Connect configuration values", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "connector_client_config_override_policy": { - Description: "Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.", + Description: "Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + ValidateFunc: validation.StringInSlice([]string{"All", "None"}, false), }, "consumer_auto_offset_reset": { Description: "Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", @@ -370,10 +370,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_isolation_level": { - Description: "Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Description: "Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + ValidateFunc: validation.StringInSlice([]string{"read_committed", "read_uncommitted"}, false), }, "consumer_max_partition_fetch_bytes": { Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.", @@ -411,10 +411,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.", @@ -548,10 +548,10 @@ func kafkaUserConfig() *schema.Schema { ValidateFunc: validation.IntInSlice([]int{1000, 15000, 30000}), }, "name_strategy": { - Description: "Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.", + Description: "Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"topic_name", "record_name", "topic_record_name"}, false), + ValidateFunc: validation.StringInSlice([]string{"record_name", "topic_name", "topic_record_name"}, false), }, "name_strategy_validation": { Description: "If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.", @@ -559,16 +559,16 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "producer_acks": { - Description: "Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.", + Description: "Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"all", "-1", "0", "1"}, false), + ValidateFunc: validation.StringInSlice([]string{"-1", "0", "1", "all"}, false), }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "Wait for up to the given delay to allow batching records together. Default: `0`.", diff --git a/internal/sdkprovider/userconfig/service/kafka_connect.go b/internal/sdkprovider/userconfig/service/kafka_connect.go index dc44309c9..8fb831730 100644 --- a/internal/sdkprovider/userconfig/service/kafka_connect.go +++ b/internal/sdkprovider/userconfig/service/kafka_connect.go @@ -68,10 +68,10 @@ func kafkaConnectUserConfig() *schema.Schema { Description: "Kafka Connect configuration values", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "connector_client_config_override_policy": { - Description: "Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.", + Description: "Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + ValidateFunc: validation.StringInSlice([]string{"All", "None"}, false), }, "consumer_auto_offset_reset": { Description: "Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", @@ -85,10 +85,10 @@ func kafkaConnectUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_isolation_level": { - Description: "Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Description: "Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + ValidateFunc: validation.StringInSlice([]string{"read_committed", "read_uncommitted"}, false), }, "consumer_max_partition_fetch_bytes": { Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.", @@ -126,10 +126,10 @@ func kafkaConnectUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.", diff --git a/internal/sdkprovider/userconfig/service/mysql.go b/internal/sdkprovider/userconfig/service/mysql.go index 3122379ed..7ecfb9452 100644 --- a/internal/sdkprovider/userconfig/service/mysql.go +++ b/internal/sdkprovider/userconfig/service/mysql.go @@ -235,16 +235,16 @@ func mysqlUserConfig() *schema.Schema { Type: schema.TypeInt, }, "internal_tmp_mem_storage_engine": { - Description: "Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.", + Description: "Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"TempTable", "MEMORY"}, false), + ValidateFunc: validation.StringInSlice([]string{"MEMORY", "TempTable"}, false), }, "log_output": { - Description: "Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.", + Description: "Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"INSIGHTS", "NONE", "TABLE", "INSIGHTS,TABLE"}, false), + ValidateFunc: validation.StringInSlice([]string{"INSIGHTS", "INSIGHTS,TABLE", "NONE", "TABLE"}, false), }, "long_query_time": { Description: "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.", diff --git a/internal/sdkprovider/userconfig/service/opensearch.go b/internal/sdkprovider/userconfig/service/opensearch.go index 2980431a8..cd28c10d7 100644 --- a/internal/sdkprovider/userconfig/service/opensearch.go +++ b/internal/sdkprovider/userconfig/service/opensearch.go @@ -57,6 +57,11 @@ func opensearchUserConfig() *schema.Schema { Optional: true, Type: schema.TypeString, }, + "include_aliases": { + Description: "Whether to restore aliases alongside their associated indexes. Default is true.", + Optional: true, + Type: schema.TypeBool, + }, "indices": { Description: "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.", Optional: true, @@ -128,6 +133,11 @@ func opensearchUserConfig() *schema.Schema { Sensitive: true, Type: schema.TypeString, }, + "include_aliases": { + Description: "Whether to restore aliases alongside their associated indexes. Default is true.", + Optional: true, + Type: schema.TypeBool, + }, "indices": { Description: "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.", Optional: true, @@ -605,10 +615,10 @@ func opensearchUserConfig() *schema.Schema { Description: "Search Backpressure Settings", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "mode": { - Description: "Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.", + Description: "Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"monitor_only", "enforced", "disabled"}, false), + ValidateFunc: validation.StringInSlice([]string{"disabled", "enforced", "monitor_only"}, false), }, "node_duress": { Description: "Node duress settings", @@ -1019,6 +1029,11 @@ func opensearchUserConfig() *schema.Schema { Optional: true, Type: schema.TypeString, }, + "include_aliases": { + Description: "Whether to restore aliases alongside their associated indexes. Default is true.", + Optional: true, + Type: schema.TypeBool, + }, "indices": { Description: "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.", Optional: true, diff --git a/internal/sdkprovider/userconfig/service/pg.go b/internal/sdkprovider/userconfig/service/pg.go index a1fc426b2..2582738ee 100644 --- a/internal/sdkprovider/userconfig/service/pg.go +++ b/internal/sdkprovider/userconfig/service/pg.go @@ -242,16 +242,16 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_error_verbosity": { - Description: "Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.", + Description: "Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"TERSE", "DEFAULT", "VERBOSE"}, false), + ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "TERSE", "VERBOSE"}, false), }, "log_line_prefix": { - Description: "Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.", + Description: "Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'%m [%p] %q[user=%u,db=%d,app=%a] '", "'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '"}, false), + ValidateFunc: validation.StringInSlice([]string{"'%m [%p] %q[user=%u,db=%d,app=%a] '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '"}, false), }, "log_min_duration_statement": { Description: "Log statements that take more than this number of milliseconds to run, -1 disables.", @@ -354,10 +354,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "pg_stat_statements__dot__track": { - Description: "Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + Description: "Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"all", "top", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"all", "none", "top"}, false), }, "temp_file_limit": { Description: "PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.", @@ -381,10 +381,10 @@ func pgUserConfig() *schema.Schema { ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), }, "track_functions": { - Description: "Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.", + Description: "Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"all", "pl", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"all", "none", "pl"}, false), }, "track_io_timing": { Description: "Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", @@ -580,10 +580,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "autodb_pool_mode": { - Description: "Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.", + Description: "Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"session", "transaction", "statement"}, false), + ValidateFunc: validation.StringInSlice([]string{"session", "statement", "transaction"}, false), }, "autodb_pool_size": { Description: "If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.", @@ -745,10 +745,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeBool, }, "synchronous_replication": { - Description: "Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + Description: "Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"quorum", "off"}, false), + ValidateFunc: validation.StringInSlice([]string{"off", "quorum"}, false), }, "timescaledb": { Description: "System-wide settings for the timescaledb extension", diff --git a/internal/sdkprovider/userconfig/service/redis.go b/internal/sdkprovider/userconfig/service/redis.go index 5f9fc77d8..fdf123724 100644 --- a/internal/sdkprovider/userconfig/service/redis.go +++ b/internal/sdkprovider/userconfig/service/redis.go @@ -215,10 +215,10 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeInt, }, "redis_maxmemory_policy": { - Description: "Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.", + Description: "Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), + ValidateFunc: validation.StringInSlice([]string{"allkeys-lfu", "allkeys-lru", "allkeys-random", "noeviction", "volatile-lfu", "volatile-lru", "volatile-random", "volatile-ttl"}, false), }, "redis_notify_keyspace_events": { Description: "Set notify-keyspace-events option.", diff --git a/internal/sdkprovider/userconfig/service/valkey.go b/internal/sdkprovider/userconfig/service/valkey.go index 69c2d2ce3..b27e8d115 100644 --- a/internal/sdkprovider/userconfig/service/valkey.go +++ b/internal/sdkprovider/userconfig/service/valkey.go @@ -231,10 +231,10 @@ func valkeyUserConfig() *schema.Schema { Type: schema.TypeInt, }, "valkey_maxmemory_policy": { - Description: "Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.", + Description: "Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), + ValidateFunc: validation.StringInSlice([]string{"allkeys-lfu", "allkeys-lru", "allkeys-random", "noeviction", "volatile-lfu", "volatile-lru", "volatile-random", "volatile-ttl"}, false), }, "valkey_notify_keyspace_events": { Description: "Set notify-keyspace-events option.", diff --git a/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go b/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go index 33508de24..dc16ca875 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go +++ b/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go @@ -17,10 +17,10 @@ func clickhouseKafkaUserConfig() *schema.Schema { Description: "Tables to create", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "auto_offset_reset": { - Description: "Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.", + Description: "Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"smallest", "earliest", "beginning", "largest", "latest", "end"}, false), + ValidateFunc: validation.StringInSlice([]string{"beginning", "earliest", "end", "largest", "latest", "smallest"}, false), }, "columns": { Description: "Table columns", @@ -41,10 +41,10 @@ func clickhouseKafkaUserConfig() *schema.Schema { Type: schema.TypeList, }, "data_format": { - Description: "Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.", + Description: "Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"Avro", "CSV", "JSONAsString", "JSONCompactEachRow", "JSONCompactStringsEachRow", "JSONEachRow", "JSONStringsEachRow", "MsgPack", "TSKV", "TSV", "TabSeparated", "RawBLOB", "AvroConfluent", "Parquet"}, false), + ValidateFunc: validation.StringInSlice([]string{"Avro", "AvroConfluent", "CSV", "JSONAsString", "JSONCompactEachRow", "JSONCompactStringsEachRow", "JSONEachRow", "JSONStringsEachRow", "MsgPack", "Parquet", "RawBLOB", "TSKV", "TSV", "TabSeparated"}, false), }, "date_time_input_format": { Description: "Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.", diff --git a/internal/sdkprovider/userconfig/serviceintegration/datadog.go b/internal/sdkprovider/userconfig/serviceintegration/datadog.go index ffee5b531..06b3e0307 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/datadog.go +++ b/internal/sdkprovider/userconfig/serviceintegration/datadog.go @@ -85,8 +85,9 @@ func datadogUserConfig() *schema.Schema { "kafka_custom_metrics": { Description: "List of custom metrics.", Elem: &schema.Schema{ - Description: "Metric name. Example: `kafka.log.log_size`.", - Type: schema.TypeString, + Description: "Enum: `kafka.log.log_end_offset`, `kafka.log.log_size`, `kafka.log.log_start_offset`. Metric name.", + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"kafka.log.log_end_offset", "kafka.log.log_size", "kafka.log.log_start_offset"}, false), }, MaxItems: 1024, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go index d81f8d8a5..5919de83a 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go @@ -16,9 +16,9 @@ func externalAwsCloudwatchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go index 61f0fa4c3..04609ac84 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go @@ -16,9 +16,9 @@ func externalElasticsearchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go index 5ff9a4d65..5b67c1bd8 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go @@ -16,9 +16,9 @@ func externalOpensearchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go b/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go index 9dc5c5d25..7ed77d1b4 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go @@ -22,9 +22,9 @@ func kafkaLogsUserConfig() *schema.Schema { "selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go b/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go index ca941c1dd..891b5b4c6 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go +++ b/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go @@ -49,10 +49,10 @@ func kafkaMirrormakerUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.", diff --git a/internal/sdkprovider/userconfig/serviceintegration/logs.go b/internal/sdkprovider/userconfig/serviceintegration/logs.go index 7e7b56462..e1f1fdea2 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/logs.go @@ -27,9 +27,9 @@ func logsUserConfig() *schema.Schema { "selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go index da87deff1..f03a7d94b 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go @@ -59,10 +59,10 @@ func datadogUserConfig() *schema.Schema { Type: schema.TypeInt, }, "site": { - Description: "Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.", + Description: "Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"datadoghq.com", "datadoghq.eu", "us3.datadoghq.com", "us5.datadoghq.com", "ddog-gov.com", "ap1.datadoghq.com"}, false), + ValidateFunc: validation.StringInSlice([]string{"ap1.datadoghq.com", "datadoghq.com", "datadoghq.eu", "ddog-gov.com", "us3.datadoghq.com", "us5.datadoghq.com"}, false), }, }}, MaxItems: 1, diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go index 45c7a6b57..0399cdd7f 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go @@ -37,10 +37,10 @@ func externalKafkaUserConfig() *schema.Schema { Type: schema.TypeString, }, "security_protocol": { - Description: "Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.", + Description: "Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"}, false), + ValidateFunc: validation.StringInSlice([]string{"PLAINTEXT", "SASL_PLAINTEXT", "SASL_SSL", "SSL"}, false), }, "ssl_ca_cert": { Description: "PEM-encoded CA certificate. Example: `-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n`.", diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go index cfb54f7fc..223e6c4ee 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go @@ -47,10 +47,10 @@ func externalPostgresqlUserConfig() *schema.Schema { Type: schema.TypeString, }, "ssl_mode": { - Description: "Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.", + Description: "Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}, false), + ValidateFunc: validation.StringInSlice([]string{"allow", "disable", "prefer", "require", "verify-ca", "verify-full"}, false), }, "ssl_root_cert": { Description: "SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n`.", diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go index 8c89d71b3..00d7e443f 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go @@ -15,10 +15,10 @@ func externalSchemaRegistryUserConfig() *schema.Schema { DiffSuppressFunc: diff.SuppressUnchanged, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "authentication": { - Description: "Enum: `none`, `basic`. Authentication method.", + Description: "Enum: `basic`, `none`. Authentication method.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"none", "basic"}, false), + ValidateFunc: validation.StringInSlice([]string{"basic", "none"}, false), }, "basic_auth_password": { Description: "Basic authentication password. Example: `Zm9vYg==`.", diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go index 08b1929bd..485de2f7e 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go @@ -25,10 +25,10 @@ func rsyslogUserConfig() *schema.Schema { Type: schema.TypeString, }, "format": { - Description: "Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.", + Description: "Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"rfc5424", "rfc3164", "custom"}, false), + ValidateFunc: validation.StringInSlice([]string{"custom", "rfc3164", "rfc5424"}, false), }, "key": { Description: "PEM encoded client key. Example: `-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n`.",