From eecf48a5aff8588a2094ca8a06ae5915e4478a38 Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Wed, 8 Nov 2023 20:37:54 +0100 Subject: [PATCH] chore(userconfig): improve naming and docs (#1420) --- CHANGELOG.md | 5 + docs/data-sources/cassandra.md | 6 +- docs/data-sources/clickhouse.md | 6 +- docs/data-sources/flink.md | 6 +- docs/data-sources/grafana.md | 24 +- docs/data-sources/influxdb.md | 6 +- docs/data-sources/kafka.md | 6 +- docs/data-sources/kafka_connect.md | 6 +- docs/data-sources/kafka_mirrormaker.md | 6 +- docs/data-sources/m3aggregator.md | 6 +- docs/data-sources/m3db.md | 14 +- docs/data-sources/mysql.md | 6 +- docs/data-sources/opensearch.md | 8 +- docs/data-sources/pg.md | 8 +- docs/data-sources/redis.md | 6 +- docs/resources/cassandra.md | 14 +- docs/resources/clickhouse.md | 14 +- docs/resources/flink.md | 10 +- docs/resources/grafana.md | 48 +- docs/resources/influxdb.md | 16 +- docs/resources/kafka.md | 34 +- docs/resources/kafka_connect.md | 16 +- docs/resources/kafka_mirrormaker.md | 10 +- docs/resources/m3aggregator.md | 10 +- docs/resources/m3db.md | 50 +- docs/resources/mysql.md | 18 +- docs/resources/opensearch.md | 46 +- docs/resources/pg.md | 30 +- docs/resources/redis.md | 16 +- go.mod | 2 +- go.sum | 4 +- internal/schemautil/mutations.go | 26 - internal/schemautil/schemautil.go | 25 - internal/schemautil/service.go | 49 +- .../service/cassandra/cassandra.go | 4 +- .../service/clickhouse/clickhouse.go | 4 +- internal/sdkprovider/service/flink/flink.go | 4 +- .../sdkprovider/service/grafana/grafana.go | 4 +- .../service/grafana/grafana_test.go | 62 ++ .../sdkprovider/service/influxdb/influxdb.go | 4 +- internal/sdkprovider/service/kafka/kafka.go | 4 +- .../service/kafka/kafka_connect.go | 4 +- .../service/kafka/kafka_mirrormaker.go | 4 +- .../sdkprovider/service/kafka/kafka_test.go | 10 +- .../sdkprovider/service/m3db/m3aggregator.go | 4 +- internal/sdkprovider/service/m3db/m3db.go | 4 +- internal/sdkprovider/service/mysql/mysql.go | 4 +- .../service/opensearch/opensearch.go | 4 +- internal/sdkprovider/service/pg/pg.go | 9 +- internal/sdkprovider/service/redis/redis.go | 4 +- .../userconfig/converters/converters.go | 430 +++++++++++ .../userconfig/converters/utils.go | 102 +++ .../userconfig/converters/utils_test.go | 150 ++++ internal/sdkprovider/userconfig/diff/diff.go | 66 ++ .../userconfig/service/cassandra.go | 160 +++++ .../userconfig/service/clickhouse.go | 171 +++++ .../sdkprovider/userconfig/service/flink.go | 105 +++ .../sdkprovider/userconfig/service/grafana.go | 622 ++++++++++++++++ .../userconfig/service/influxdb.go | 173 +++++ .../sdkprovider/userconfig/service/kafka.go | 669 +++++++++++++++++ .../userconfig/service/kafka_connect.go | 227 ++++++ .../userconfig/service/kafka_mirrormaker.go | 134 ++++ .../userconfig/service/m3aggregator.go | 83 +++ .../sdkprovider/userconfig/service/m3db.go | 366 ++++++++++ .../sdkprovider/userconfig/service/mysql.go | 407 +++++++++++ .../userconfig/service/opensearch.go | 674 ++++++++++++++++++ internal/sdkprovider/userconfig/service/pg.go | 630 ++++++++++++++++ .../sdkprovider/userconfig/service/redis.go | 261 +++++++ .../sdkprovider/userconfig/service/service.go | 40 ++ main.go | 2 +- ucgenerator/main.go | 613 +++++----------- ucgenerator/models.go | 191 ++++- 72 files changed, 6146 insertions(+), 820 deletions(-) create mode 100644 internal/sdkprovider/userconfig/converters/converters.go create mode 100644 internal/sdkprovider/userconfig/converters/utils.go create mode 100644 internal/sdkprovider/userconfig/converters/utils_test.go create mode 100644 internal/sdkprovider/userconfig/diff/diff.go create mode 100644 internal/sdkprovider/userconfig/service/cassandra.go create mode 100644 internal/sdkprovider/userconfig/service/clickhouse.go create mode 100644 internal/sdkprovider/userconfig/service/flink.go create mode 100644 internal/sdkprovider/userconfig/service/grafana.go create mode 100644 internal/sdkprovider/userconfig/service/influxdb.go create mode 100644 internal/sdkprovider/userconfig/service/kafka.go create mode 100644 internal/sdkprovider/userconfig/service/kafka_connect.go create mode 100644 internal/sdkprovider/userconfig/service/kafka_mirrormaker.go create mode 100644 internal/sdkprovider/userconfig/service/m3aggregator.go create mode 100644 internal/sdkprovider/userconfig/service/m3db.go create mode 100644 internal/sdkprovider/userconfig/service/mysql.go create mode 100644 internal/sdkprovider/userconfig/service/opensearch.go create mode 100644 internal/sdkprovider/userconfig/service/pg.go create mode 100644 internal/sdkprovider/userconfig/service/redis.go create mode 100644 internal/sdkprovider/userconfig/service/service.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 7def09051..537034060 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,11 @@ nav_order: 1 - Deprecating `project_user`, `account_team` and `account_team_member` resources ## [X.Y.Z] - YYYY-MM-DD +- +- Add new user config generator +- Use `TypeSet` for arrays with scalar values + +## [4.10.0] - YYYY-MM-DD - Fix unmarshalling empty userconfig crash diff --git a/docs/data-sources/cassandra.md b/docs/data-sources/cassandra.md index 2442962ab..957f0d950 100644 --- a/docs/data-sources/cassandra.md +++ b/docs/data-sources/cassandra.md @@ -68,14 +68,14 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `backup_hour` (Number) - `backup_minute` (Number) - `cassandra` (List of Object) (see [below for nested schema](#nestedobjatt--cassandra_user_config--cassandra)) - `cassandra_version` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--cassandra_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migrate_sstableloader` (Boolean) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--cassandra_user_config--private_access)) - `project_to_fork_from` (String) diff --git a/docs/data-sources/clickhouse.md b/docs/data-sources/clickhouse.md index 7b6e08d20..c25f6b9de 100644 --- a/docs/data-sources/clickhouse.md +++ b/docs/data-sources/clickhouse.md @@ -68,10 +68,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_user_config--privatelink_access)) - `project_to_fork_from` (String) diff --git a/docs/data-sources/flink.md b/docs/data-sources/flink.md index 3cfb9e5d0..060ddd4d1 100644 --- a/docs/data-sources/flink.md +++ b/docs/data-sources/flink.md @@ -83,11 +83,11 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `flink_version` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--flink_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `number_of_task_slots` (Number) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--flink_user_config--privatelink_access)) - `static_ips` (Boolean) diff --git a/docs/data-sources/grafana.md b/docs/data-sources/grafana.md index 6e41e05d5..63f4567d7 100644 --- a/docs/data-sources/grafana.md +++ b/docs/data-sources/grafana.md @@ -82,7 +82,7 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `alerting_enabled` (Boolean) - `alerting_error_or_timeout` (String) - `alerting_max_annotations_to_keep` (Number) @@ -106,9 +106,9 @@ Read-Only: - `editors_can_admin` (Boolean) - `external_image_storage` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--external_image_storage)) - `google_analytics_ua_id` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `metrics_enabled` (Boolean) - `oauth_allow_insecure_email_lookup` (Boolean) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--private_access)) @@ -130,8 +130,8 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_domains` (List of String) -- `allowed_groups` (List of String) +- `allowed_domains` (Set of String) +- `allowed_groups` (Set of String) - `auth_url` (String) - `client_id` (String) - `client_secret` (String) @@ -144,15 +144,15 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_domains` (List of String) -- `allowed_organizations` (List of String) +- `allowed_domains` (Set of String) +- `allowed_organizations` (Set of String) - `api_url` (String) - `auth_url` (String) - `auto_login` (Boolean) - `client_id` (String) - `client_secret` (String) - `name` (String) -- `scopes` (List of String) +- `scopes` (Set of String) - `token_url` (String) @@ -162,10 +162,10 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_organizations` (List of String) +- `allowed_organizations` (Set of String) - `client_id` (String) - `client_secret` (String) -- `team_ids` (List of Number) +- `team_ids` (Set of Number) @@ -174,7 +174,7 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_groups` (List of String) +- `allowed_groups` (Set of String) - `api_url` (String) - `auth_url` (String) - `client_id` (String) @@ -188,7 +188,7 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_domains` (List of String) +- `allowed_domains` (Set of String) - `client_id` (String) - `client_secret` (String) diff --git a/docs/data-sources/influxdb.md b/docs/data-sources/influxdb.md index 7c198c34f..51f5bee5b 100644 --- a/docs/data-sources/influxdb.md +++ b/docs/data-sources/influxdb.md @@ -83,12 +83,12 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) - `influxdb` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--influxdb)) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--privatelink_access)) - `project_to_fork_from` (String) diff --git a/docs/data-sources/kafka.md b/docs/data-sources/kafka.md index f652b8a5b..96e2b922a 100644 --- a/docs/data-sources/kafka.md +++ b/docs/data-sources/kafka.md @@ -89,12 +89,12 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `aiven_kafka_topic_messages` (Boolean) - `custom_domain` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `kafka` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--kafka)) - `kafka_authentication_methods` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--kafka_authentication_methods)) - `kafka_connect` (Boolean) diff --git a/docs/data-sources/kafka_connect.md b/docs/data-sources/kafka_connect.md index 2d774a226..c59085ac0 100644 --- a/docs/data-sources/kafka_connect.md +++ b/docs/data-sources/kafka_connect.md @@ -82,10 +82,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `kafka_connect` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--kafka_connect)) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--privatelink_access)) diff --git a/docs/data-sources/kafka_mirrormaker.md b/docs/data-sources/kafka_mirrormaker.md index 23fa4440f..4a8e3e804 100644 --- a/docs/data-sources/kafka_mirrormaker.md +++ b/docs/data-sources/kafka_mirrormaker.md @@ -82,10 +82,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `kafka_mirrormaker` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--kafka_mirrormaker)) - `static_ips` (Boolean) diff --git a/docs/data-sources/m3aggregator.md b/docs/data-sources/m3aggregator.md index 24e58377a..10f67db5a 100644 --- a/docs/data-sources/m3aggregator.md +++ b/docs/data-sources/m3aggregator.md @@ -39,7 +39,7 @@ data "aiven_m3aggregator" "m3a" { - `disk_space_used` (String) Disk space that service is currently using - `id` (String) The ID of this resource. - `m3aggregator` (List of Object) M3 aggregator specific server provided values (see [below for nested schema](#nestedatt--m3aggregator)) -- `m3aggregator_user_config` (List of Object) M3aggregator user configurable settings (see [below for nested schema](#nestedatt--m3aggregator_user_config)) +- `m3aggregator_user_config` (List of Object) M3Aggregator user configurable settings (see [below for nested schema](#nestedatt--m3aggregator_user_config)) - `maintenance_window_dow` (String) Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc. - `maintenance_window_time` (String) Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format. - `plan` (String) Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are `hobbyist`, `startup-x`, `business-x` and `premium-x` where `x` is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the [Aiven pricing page](https://aiven.io/pricing). @@ -83,9 +83,9 @@ Read-Only: Read-Only: - `custom_domain` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--m3aggregator_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `m3_version` (String) - `m3aggregator_version` (String) - `static_ips` (Boolean) diff --git a/docs/data-sources/m3db.md b/docs/data-sources/m3db.md index 2af4137fb..9fa01714d 100644 --- a/docs/data-sources/m3db.md +++ b/docs/data-sources/m3db.md @@ -39,7 +39,7 @@ data "aiven_m3db" "m3" { - `disk_space_used` (String) Disk space that service is currently using - `id` (String) The ID of this resource. - `m3db` (List of Object) M3 specific server provided values (see [below for nested schema](#nestedatt--m3db)) -- `m3db_user_config` (List of Object) M3db user configurable settings (see [below for nested schema](#nestedatt--m3db_user_config)) +- `m3db_user_config` (List of Object) M3Db user configurable settings (see [below for nested schema](#nestedatt--m3db_user_config)) - `maintenance_window_dow` (String) Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc. - `maintenance_window_time` (String) Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format. - `plan` (String) Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are `hobbyist`, `startup-x`, `business-x` and `premium-x` where `x` is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the [Aiven pricing page](https://aiven.io/pricing). @@ -82,11 +82,11 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `limits` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--limits)) - `m3` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--m3)) - `m3_version` (String) @@ -200,13 +200,13 @@ Read-Only: Read-Only: -- `aggregations` (List of String) +- `aggregations` (Set of String) - `drop` (Boolean) - `filter` (String) - `name` (String) -- `namespaces` (List of String) +- `namespaces` (Set of String) - `namespaces_object` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--rules--mapping--namespaces_object)) -- `namespaces_string` (List of String) +- `namespaces_string` (Set of String) - `tags` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--rules--mapping--tags)) diff --git a/docs/data-sources/mysql.md b/docs/data-sources/mysql.md index 7bd9111ed..cbd472025 100644 --- a/docs/data-sources/mysql.md +++ b/docs/data-sources/mysql.md @@ -82,15 +82,15 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `admin_password` (String) - `admin_username` (String) - `backup_hour` (Number) - `backup_minute` (Number) - `binlog_retention_period` (Number) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--mysql_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migration` (List of Object) (see [below for nested schema](#nestedobjatt--mysql_user_config--migration)) - `mysql` (List of Object) (see [below for nested schema](#nestedobjatt--mysql_user_config--mysql)) - `mysql_version` (String) diff --git a/docs/data-sources/opensearch.md b/docs/data-sources/opensearch.md index e6255d194..e4f0e92fb 100644 --- a/docs/data-sources/opensearch.md +++ b/docs/data-sources/opensearch.md @@ -83,14 +83,14 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) - `disable_replication_factor_adjustment` (Boolean) - `index_patterns` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--index_patterns)) - `index_template` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--index_template)) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `keep_index_refresh_interval` (Boolean) - `max_index_count` (Number) - `openid` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--openid)) @@ -183,7 +183,7 @@ Read-Only: - `ism_history_rollover_check_period` (Number) - `ism_history_rollover_retention_period` (Number) - `override_main_response_version` (Boolean) -- `reindex_remote_whitelist` (List of String) +- `reindex_remote_whitelist` (Set of String) - `script_max_compilations_rate` (String) - `search_max_buckets` (Number) - `thread_pool_analyze_queue_size` (Number) diff --git a/docs/data-sources/pg.md b/docs/data-sources/pg.md index c0dfeadd1..78cb272d5 100644 --- a/docs/data-sources/pg.md +++ b/docs/data-sources/pg.md @@ -91,15 +91,15 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `admin_password` (String) - `admin_username` (String) - `backup_hour` (Number) - `backup_minute` (Number) - `enable_ipv6` (Boolean) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--pg_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migration` (List of Object) (see [below for nested schema](#nestedobjatt--pg_user_config--migration)) - `pg` (List of Object) (see [below for nested schema](#nestedobjatt--pg_user_config--pg)) - `pg_read_replica` (Boolean) @@ -210,7 +210,7 @@ Read-Only: - `autodb_max_db_connections` (Number) - `autodb_pool_mode` (String) - `autodb_pool_size` (Number) -- `ignore_startup_parameters` (List of String) +- `ignore_startup_parameters` (Set of String) - `min_pool_size` (Number) - `server_idle_timeout` (Number) - `server_lifetime` (Number) diff --git a/docs/data-sources/redis.md b/docs/data-sources/redis.md index 79b7b3bc5..6bcb1d2f9 100644 --- a/docs/data-sources/redis.md +++ b/docs/data-sources/redis.md @@ -82,10 +82,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migration` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--migration)) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--privatelink_access)) diff --git a/docs/resources/cassandra.md b/docs/resources/cassandra.md index 8e2bff40d..ebc70b355 100644 --- a/docs/resources/cassandra.md +++ b/docs/resources/cassandra.md @@ -77,18 +77,18 @@ resource "aiven_cassandra" "bar" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String, Deprecated) Additional Cloud Regions for Backup Replication. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. -- `cassandra` (Block List, Max: 1) cassandra configuration values. (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) +- `cassandra` (Block List, Max: 1) cassandra configuration values (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) - `cassandra_version` (String) Cassandra major version. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--cassandra_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--cassandra_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `migrate_sstableloader` (Boolean) Sets the service into migration mode enabling the sstableloader utility to be used to upload Cassandra data files. Available only on service create. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--cassandra_user_config--private_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--cassandra_user_config--private_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--cassandra_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--cassandra_user_config--public_access)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `service_to_join_with` (String) When bootstrapping, instead of creating a new Cassandra cluster try to join an existing one from another service. Can only be set on service creation. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/clickhouse.md b/docs/resources/clickhouse.md index fad489322..cbab2ec64 100644 --- a/docs/resources/clickhouse.md +++ b/docs/resources/clickhouse.md @@ -69,14 +69,14 @@ resource "aiven_clickhouse" "clickhouse" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--clickhouse_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--clickhouse_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--clickhouse_user_config--privatelink_access)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--clickhouse_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--clickhouse_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--clickhouse_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--clickhouse_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--clickhouse_user_config--public_access)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/flink.md b/docs/resources/flink.md index b0bf76415..6f570400d 100644 --- a/docs/resources/flink.md +++ b/docs/resources/flink.md @@ -81,13 +81,13 @@ Optional: Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `flink_version` (String) Flink major version. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--flink_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--flink_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `number_of_task_slots` (Number) Task slots per node. For a 3 node plan, total number of task slots is 3x this value. -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--flink_user_config--privatelink_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--flink_user_config--privatelink_access)) - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/grafana.md b/docs/resources/grafana.md index 53a6a2d77..8f91ff356 100644 --- a/docs/resources/grafana.md +++ b/docs/resources/grafana.md @@ -77,18 +77,18 @@ resource "aiven_grafana" "gr1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `alerting_enabled` (Boolean) Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled. - `alerting_error_or_timeout` (String) Default error or timeout setting for new alerting rules. - `alerting_max_annotations_to_keep` (Number) Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. - `alerting_nodata_or_nullvalues` (String) Default value for 'no data or null values' for new alerting rules. - `allow_embedding` (Boolean) Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking. -- `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) +- `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) - `auth_basic_enabled` (Boolean) Enable or disable basic authentication form, used by Grafana built-in login. -- `auth_generic_oauth` (Block List, Max: 1) Generic OAuth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_generic_oauth)) -- `auth_github` (Block List, Max: 1) Github Auth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) -- `auth_gitlab` (Block List, Max: 1) GitLab Auth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) -- `auth_google` (Block List, Max: 1) Google Auth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) +- `auth_generic_oauth` (Block List, Max: 1) Generic OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_generic_oauth)) +- `auth_github` (Block List, Max: 1) Github Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) +- `auth_gitlab` (Block List, Max: 1) GitLab Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) +- `auth_google` (Block List, Max: 1) Google Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) - `cookie_samesite` (String) Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. - `dashboard_previews_enabled` (Boolean) This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering. @@ -96,23 +96,23 @@ Optional: - `dashboards_versions_to_keep` (Number) Dashboard versions to keep per dashboard. - `dataproxy_send_user_header` (Boolean) Send 'X-Grafana-User' header to data source. - `dataproxy_timeout` (Number) Timeout for data proxy requests in seconds. -- `date_formats` (Block List, Max: 1) Grafana date format specifications. (see [below for nested schema](#nestedblock--grafana_user_config--date_formats)) +- `date_formats` (Block List, Max: 1) Grafana date format specifications (see [below for nested schema](#nestedblock--grafana_user_config--date_formats)) - `disable_gravatar` (Boolean) Set to true to disable gravatar. Defaults to false (gravatar is enabled). - `editors_can_admin` (Boolean) Editors can manage folders, teams and dashboards created by them. -- `external_image_storage` (Block List, Max: 1) External image store settings. (see [below for nested schema](#nestedblock--grafana_user_config--external_image_storage)) +- `external_image_storage` (Block List, Max: 1) External image store settings (see [below for nested schema](#nestedblock--grafana_user_config--external_image_storage)) - `google_analytics_ua_id` (String) Google Analytics ID. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--grafana_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--grafana_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `metrics_enabled` (Boolean) Enable Grafana /metrics endpoint. - `oauth_allow_insecure_email_lookup` (Boolean) Enforce user lookup based on email instead of the unique ID provided by the IdP. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--grafana_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--grafana_user_config--privatelink_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--grafana_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--grafana_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--grafana_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--grafana_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. -- `smtp_server` (Block List, Max: 1) SMTP server settings. (see [below for nested schema](#nestedblock--grafana_user_config--smtp_server)) +- `smtp_server` (Block List, Max: 1) SMTP server settings (see [below for nested schema](#nestedblock--grafana_user_config--smtp_server)) - `static_ips` (Boolean) Use static public IP addresses. - `unified_alerting_enabled` (Boolean) Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details. - `user_auto_assign_org` (Boolean) Auto-assign new users on signup to main organization. Defaults to false. @@ -132,8 +132,8 @@ Required: Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_domains` (List of String) Allowed domains. -- `allowed_groups` (List of String) Require users to belong to one of given groups. +- `allowed_domains` (Set of String) Allowed domains. +- `allowed_groups` (Set of String) Require users to belong to one of given groups. @@ -150,11 +150,11 @@ Required: Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_domains` (List of String) Allowed domains. -- `allowed_organizations` (List of String) Require user to be member of one of the listed organizations. +- `allowed_domains` (Set of String) Allowed domains. +- `allowed_organizations` (Set of String) Require user to be member of one of the listed organizations. - `auto_login` (Boolean) Allow users to bypass the login screen and automatically log in. - `name` (String) Name of the OAuth integration. -- `scopes` (List of String) OAuth scopes. +- `scopes` (Set of String) OAuth scopes. @@ -168,8 +168,8 @@ Required: Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_organizations` (List of String) Require users to belong to one of given organizations. -- `team_ids` (List of Number) Require users to belong to one of given team IDs. +- `allowed_organizations` (Set of String) Require users to belong to one of given organizations. +- `team_ids` (Set of Number) Require users to belong to one of given team IDs. @@ -177,13 +177,13 @@ Optional: Required: +- `allowed_groups` (Set of String) Require users to belong to one of given groups. - `client_id` (String) Client ID from provider. - `client_secret` (String) Client secret from provider. Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_groups` (List of String) Require users to belong to one of given groups. - `api_url` (String) API URL. This only needs to be set when using self hosted GitLab. - `auth_url` (String) Authorization URL. This only needs to be set when using self hosted GitLab. - `token_url` (String) Token URL. This only needs to be set when using self hosted GitLab. @@ -194,13 +194,13 @@ Optional: Required: +- `allowed_domains` (Set of String) Domains allowed to sign-in to this Grafana. - `client_id` (String) Client ID from provider. - `client_secret` (String) Client secret from provider. Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_domains` (List of String) Domains allowed to sign-in to this Grafana. diff --git a/docs/resources/influxdb.md b/docs/resources/influxdb.md index acb8072cb..bb970489b 100644 --- a/docs/resources/influxdb.md +++ b/docs/resources/influxdb.md @@ -75,16 +75,16 @@ resource "aiven_influxdb" "inf1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `influxdb` (Block List, Max: 1) influxdb.conf configuration values. (see [below for nested schema](#nestedblock--influxdb_user_config--influxdb)) -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--influxdb_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--influxdb_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--influxdb_user_config--privatelink_access)) +- `influxdb` (Block List, Max: 1) influxdb.conf configuration values (see [below for nested schema](#nestedblock--influxdb_user_config--influxdb)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--influxdb_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--influxdb_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--influxdb_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--influxdb_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--influxdb_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 9ffd767d1..d9786b718 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -88,27 +88,27 @@ resource "aiven_kafka" "kafka1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `aiven_kafka_topic_messages` (Boolean) Allow access to read Kafka topic messages in the Aiven Console and REST API. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--kafka_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `kafka` (Block List, Max: 1) Kafka broker configuration values. (see [below for nested schema](#nestedblock--kafka_user_config--kafka)) -- `kafka_authentication_methods` (Block List, Max: 1) Kafka authentication methods. (see [below for nested schema](#nestedblock--kafka_user_config--kafka_authentication_methods)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--kafka_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `kafka` (Block List, Max: 1) Kafka broker configuration values (see [below for nested schema](#nestedblock--kafka_user_config--kafka)) +- `kafka_authentication_methods` (Block List, Max: 1) Kafka authentication methods (see [below for nested schema](#nestedblock--kafka_user_config--kafka_authentication_methods)) - `kafka_connect` (Boolean) Enable Kafka Connect service. The default value is `false`. -- `kafka_connect_config` (Block List, Max: 1) Kafka Connect configuration values. (see [below for nested schema](#nestedblock--kafka_user_config--kafka_connect_config)) +- `kafka_connect_config` (Block List, Max: 1) Kafka Connect configuration values (see [below for nested schema](#nestedblock--kafka_user_config--kafka_connect_config)) - `kafka_rest` (Boolean) Enable Kafka-REST service. The default value is `false`. - `kafka_rest_authorization` (Boolean) Enable authorization in Kafka-REST service. -- `kafka_rest_config` (Block List, Max: 1) Kafka REST configuration. (see [below for nested schema](#nestedblock--kafka_user_config--kafka_rest_config)) +- `kafka_rest_config` (Block List, Max: 1) Kafka REST configuration (see [below for nested schema](#nestedblock--kafka_user_config--kafka_rest_config)) - `kafka_version` (String) Kafka major version. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--kafka_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--kafka_user_config--privatelink_access)) -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--kafka_user_config--public_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--kafka_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--kafka_user_config--privatelink_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--kafka_user_config--public_access)) - `schema_registry` (Boolean) Enable Schema-Registry service. The default value is `false`. -- `schema_registry_config` (Block List, Max: 1) Schema Registry configuration. (see [below for nested schema](#nestedblock--kafka_user_config--schema_registry_config)) +- `schema_registry_config` (Block List, Max: 1) Schema Registry configuration (see [below for nested schema](#nestedblock--kafka_user_config--schema_registry_config)) - `static_ips` (Boolean) Use static public IP addresses. -- `tiered_storage` (Block List, Max: 1) Tiered storage configuration. (see [below for nested schema](#nestedblock--kafka_user_config--tiered_storage)) +- `tiered_storage` (Block List, Max: 1) Tiered storage configuration (see [below for nested schema](#nestedblock--kafka_user_config--tiered_storage)) ### Nested Schema for `kafka_user_config.ip_filter_object` @@ -134,7 +134,7 @@ Optional: - `group_initial_rebalance_delay_ms` (Number) The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. - `group_max_session_timeout_ms` (Number) The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. - `group_min_session_timeout_ms` (Number) The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. -- `log_cleaner_delete_retention_ms` (Number) How long are delete records retained?. +- `log_cleaner_delete_retention_ms` (Number) How long are delete records retained? - `log_cleaner_max_compaction_lag_ms` (Number) The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. - `log_cleaner_min_cleanable_ratio` (Number) Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. - `log_cleaner_min_compaction_lag_ms` (Number) The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. @@ -148,7 +148,7 @@ Optional: - `log_message_downconversion_enable` (Boolean) This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. . - `log_message_timestamp_difference_max_ms` (Number) The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. - `log_message_timestamp_type` (String) Define whether the timestamp in the message is message create time or log append time. -- `log_preallocate` (Boolean) Should pre allocate file when create new segment?. +- `log_preallocate` (Boolean) Should pre allocate file when create new segment? - `log_retention_bytes` (Number) The maximum size of the log before deleting messages. - `log_retention_hours` (Number) The number of hours to keep a log file before deleting it. - `log_retention_ms` (Number) The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. @@ -273,14 +273,14 @@ Optional: Optional: - `enabled` (Boolean) Whether to enable the tiered storage functionality. -- `local_cache` (Block List, Max: 1) Local cache configuration. (see [below for nested schema](#nestedblock--kafka_user_config--tiered_storage--local_cache)) +- `local_cache` (Block List, Max: 1, Deprecated) Local cache configuration (see [below for nested schema](#nestedblock--kafka_user_config--tiered_storage--local_cache)) ### Nested Schema for `kafka_user_config.tiered_storage.local_cache` Optional: -- `size` (Number) Local cache size in bytes. +- `size` (Number, Deprecated) Local cache size in bytes. diff --git a/docs/resources/kafka_connect.md b/docs/resources/kafka_connect.md index f1181fc5d..4cd5a484f 100644 --- a/docs/resources/kafka_connect.md +++ b/docs/resources/kafka_connect.md @@ -79,14 +79,14 @@ resource "aiven_kafka_connect" "kc1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--kafka_connect_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `kafka_connect` (Block List, Max: 1) Kafka Connect configuration values. (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--kafka_connect_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--kafka_connect_user_config--privatelink_access)) -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--kafka_connect_user_config--public_access)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--kafka_connect_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `kafka_connect` (Block List, Max: 1) Kafka Connect configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--kafka_connect_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--kafka_connect_user_config--privatelink_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--kafka_connect_user_config--public_access)) - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/kafka_mirrormaker.md b/docs/resources/kafka_mirrormaker.md index 70be653a0..231dcf707 100644 --- a/docs/resources/kafka_mirrormaker.md +++ b/docs/resources/kafka_mirrormaker.md @@ -77,11 +77,11 @@ resource "aiven_kafka_mirrormaker" "mm1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/m3aggregator.md b/docs/resources/m3aggregator.md index 79c0cffc4..a5307b3ee 100644 --- a/docs/resources/m3aggregator.md +++ b/docs/resources/m3aggregator.md @@ -41,7 +41,7 @@ resource "aiven_m3aggregator" "m3a" { - `additional_disk_space` (String) Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing. - `cloud_name` (String) Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (`aws`, `azure`, `do` `google`, `upcloud`, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like [here for Google](https://cloud.google.com/compute/docs/regions-zones/) and [here for AWS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). - `disk_space` (String, Deprecated) Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing. -- `m3aggregator_user_config` (Block List, Max: 1) M3aggregator user configurable settings (see [below for nested schema](#nestedblock--m3aggregator_user_config)) +- `m3aggregator_user_config` (Block List, Max: 1) M3Aggregator user configurable settings (see [below for nested schema](#nestedblock--m3aggregator_user_config)) - `maintenance_window_dow` (String) Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc. - `maintenance_window_time` (String) Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format. - `project_vpc_id` (String) Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data. @@ -74,10 +74,10 @@ resource "aiven_m3aggregator" "m3a" { Optional: - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--m3aggregator_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `m3_version` (String, Deprecated) M3 major version (deprecated, use m3aggregator_version). +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--m3aggregator_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `m3_version` (String) M3 major version (deprecated, use m3aggregator_version). - `m3aggregator_version` (String) M3 major version (the minimum compatible version). - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/m3db.md b/docs/resources/m3db.md index 7f2c1ff40..56feece76 100644 --- a/docs/resources/m3db.md +++ b/docs/resources/m3db.md @@ -46,7 +46,7 @@ resource "aiven_m3db" "m3" { - `additional_disk_space` (String) Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing. - `cloud_name` (String) Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (`aws`, `azure`, `do` `google`, `upcloud`, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like [here for Google](https://cloud.google.com/compute/docs/regions-zones/) and [here for AWS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). - `disk_space` (String, Deprecated) Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing. -- `m3db_user_config` (Block List, Max: 1) M3db user configurable settings (see [below for nested schema](#nestedblock--m3db_user_config)) +- `m3db_user_config` (Block List, Max: 1) M3Db user configurable settings (see [below for nested schema](#nestedblock--m3db_user_config)) - `maintenance_window_dow` (String) Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc. - `maintenance_window_time` (String) Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format. - `project_vpc_id` (String) Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data. @@ -78,21 +78,21 @@ resource "aiven_m3db" "m3" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--m3db_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `limits` (Block List, Max: 1) M3 limits. (see [below for nested schema](#nestedblock--m3db_user_config--limits)) -- `m3` (Block List, Max: 1) M3 specific configuration options. (see [below for nested schema](#nestedblock--m3db_user_config--m3)) -- `m3_version` (String, Deprecated) M3 major version (deprecated, use m3db_version). +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--m3db_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `limits` (Block List, Max: 1) M3 limits (see [below for nested schema](#nestedblock--m3db_user_config--limits)) +- `m3` (Block List, Max: 1) M3 specific configuration options (see [below for nested schema](#nestedblock--m3db_user_config--m3)) +- `m3_version` (String) M3 major version (deprecated, use m3db_version). - `m3coordinator_enable_graphite_carbon_ingest` (Boolean) Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only. - `m3db_version` (String) M3 major version (the minimum compatible version). -- `namespaces` (Block List, Max: 2147483647) List of M3 namespaces. (see [below for nested schema](#nestedblock--m3db_user_config--namespaces)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--m3db_user_config--private_access)) +- `namespaces` (Block List, Max: 2147483647) List of M3 namespaces (see [below for nested schema](#nestedblock--m3db_user_config--namespaces)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--m3db_user_config--private_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--m3db_user_config--public_access)) -- `rules` (Block List, Max: 1) M3 rules. (see [below for nested schema](#nestedblock--m3db_user_config--rules)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--m3db_user_config--public_access)) +- `rules` (Block List, Max: 1) M3 rules (see [below for nested schema](#nestedblock--m3db_user_config--rules)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. @@ -126,7 +126,7 @@ Optional: Optional: -- `tag_options` (Block List, Max: 1) M3 Tag Options. (see [below for nested schema](#nestedblock--m3db_user_config--m3--tag_options)) +- `tag_options` (Block List, Max: 1) M3 Tag Options (see [below for nested schema](#nestedblock--m3db_user_config--m3--tag_options)) ### Nested Schema for `m3db_user_config.m3.tag_options` @@ -148,15 +148,18 @@ Required: Optional: -- `options` (Block List, Max: 1) Namespace options. (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options)) +- `options` (Block List, Max: 1) Namespace options (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options)) - `resolution` (String) The resolution for an aggregated namespace. ### Nested Schema for `m3db_user_config.namespaces.options` +Required: + +- `retention_options` (Block List, Min: 1, Max: 1) Retention options (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options--retention_options)) + Optional: -- `retention_options` (Block List, Max: 1) Retention options. (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options--retention_options)) - `snapshot_enabled` (Boolean) Controls whether M3DB will create snapshot files for this namespace. - `writes_to_commitlog` (Boolean) Controls whether M3DB will include writes to this namespace in the commitlog. @@ -195,7 +198,7 @@ Optional: Optional: -- `mapping` (Block List, Max: 10) List of M3 mapping rules. (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping)) +- `mapping` (Block List, Max: 10) List of M3 mapping rules (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping)) ### Nested Schema for `m3db_user_config.rules.mapping` @@ -206,20 +209,23 @@ Required: Optional: -- `aggregations` (List of String) List of aggregations to be applied. +- `aggregations` (Set of String) List of aggregations to be applied. - `drop` (Boolean) Only store the derived metric (as specified in the roll-up rules), if any. - `name` (String) The (optional) name of the rule. -- `namespaces` (List of String, Deprecated) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). -- `namespaces_object` (Block List, Max: 10) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution. (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--namespaces_object)) -- `namespaces_string` (List of String) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). -- `tags` (Block List, Max: 10) List of tags to be appended to matching metrics. (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--tags)) +- `namespaces` (Set of String, Deprecated) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). +- `namespaces_object` (Block List, Max: 10) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--namespaces_object)) +- `namespaces_string` (Set of String) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). +- `tags` (Block List, Max: 10) List of tags to be appended to matching metrics (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--tags)) ### Nested Schema for `m3db_user_config.rules.mapping.namespaces_object` -Optional: +Required: - `resolution` (String) The resolution for the matching namespace. + +Optional: + - `retention` (String) The retention period of the matching namespace. diff --git a/docs/resources/mysql.md b/docs/resources/mysql.md index 17f36a8a0..e3b160452 100644 --- a/docs/resources/mysql.md +++ b/docs/resources/mysql.md @@ -82,22 +82,22 @@ resource "aiven_mysql" "mysql1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `admin_password` (String, Sensitive) Custom password for admin user. Defaults to random string. This must be set only when a new service is being created. - `admin_username` (String) Custom username for admin user. This must be set only when a new service is being created. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. - `binlog_retention_period` (Number) The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--mysql_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `migration` (Block List, Max: 1) Migrate data from existing server. (see [below for nested schema](#nestedblock--mysql_user_config--migration)) -- `mysql` (Block List, Max: 1) mysql.conf configuration values. (see [below for nested schema](#nestedblock--mysql_user_config--mysql)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--mysql_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--mysql_user_config--migration)) +- `mysql` (Block List, Max: 1) mysql.conf configuration values (see [below for nested schema](#nestedblock--mysql_user_config--mysql)) - `mysql_version` (String) MySQL major version. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--mysql_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--mysql_user_config--privatelink_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--mysql_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--mysql_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--mysql_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--mysql_user_config--public_access)) - `recovery_target_time` (String) Recovery target time when forking a service. This has effect only when a new service is being created. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md index df16cf534..d588d7375 100644 --- a/docs/resources/opensearch.md +++ b/docs/resources/opensearch.md @@ -83,26 +83,26 @@ resource "aiven_opensearch" "os1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `disable_replication_factor_adjustment` (Boolean, Deprecated) Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated. -- `index_patterns` (Block List, Max: 512) Index patterns. (see [below for nested schema](#nestedblock--opensearch_user_config--index_patterns)) -- `index_template` (Block List, Max: 1) Template settings for all new indexes. (see [below for nested schema](#nestedblock--opensearch_user_config--index_template)) -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--opensearch_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `disable_replication_factor_adjustment` (Boolean) Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated. +- `index_patterns` (Block List, Max: 512) Index patterns (see [below for nested schema](#nestedblock--opensearch_user_config--index_patterns)) +- `index_template` (Block List, Max: 1) Template settings for all new indexes (see [below for nested schema](#nestedblock--opensearch_user_config--index_template)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--opensearch_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `keep_index_refresh_interval` (Boolean) Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true. -- `max_index_count` (Number, Deprecated) Use index_patterns instead. The default value is `0`. -- `openid` (Block List, Max: 1) OpenSearch OpenID Connect Configuration. (see [below for nested schema](#nestedblock--opensearch_user_config--openid)) -- `opensearch` (Block List, Max: 1) OpenSearch settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch)) -- `opensearch_dashboards` (Block List, Max: 1) OpenSearch Dashboards settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch_dashboards)) +- `max_index_count` (Number) use index_patterns instead. The default value is `0`. +- `openid` (Block List, Max: 1) OpenSearch OpenID Connect Configuration (see [below for nested schema](#nestedblock--opensearch_user_config--openid)) +- `opensearch` (Block List, Max: 1) OpenSearch settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch)) +- `opensearch_dashboards` (Block List, Max: 1) OpenSearch Dashboards settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch_dashboards)) - `opensearch_version` (String) OpenSearch major version. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--opensearch_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--opensearch_user_config--privatelink_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--opensearch_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--opensearch_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--opensearch_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--opensearch_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. -- `saml` (Block List, Max: 1) OpenSearch SAML configuration. (see [below for nested schema](#nestedblock--opensearch_user_config--saml)) +- `saml` (Block List, Max: 1) OpenSearch SAML configuration (see [below for nested schema](#nestedblock--opensearch_user_config--saml)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. @@ -170,10 +170,10 @@ Optional: - `action_auto_create_index_enabled` (Boolean) Explicitly allow or block automatic creation of indices. Defaults to true. - `action_destructive_requires_name` (Boolean) Require explicit index names when deleting. -- `auth_failure_listeners` (Block List, Max: 1) Opensearch Security Plugin Settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners)) +- `auth_failure_listeners` (Block List, Max: 1) Opensearch Security Plugin Settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners)) - `cluster_max_shards_per_node` (Number) Controls the number of shards allowed in the cluster per data node. - `cluster_routing_allocation_node_concurrent_recoveries` (Number) How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2. -- `email_sender_name` (String) This should be identical to the Sender name defined in Opensearch dashboards. +- `email_sender_name` (String) Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. - `email_sender_password` (String, Sensitive) Sender password for Opensearch alerts to authenticate with SMTP server. - `email_sender_username` (String) Sender username for Opensearch alerts. - `http_max_content_length` (Number) Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes. @@ -192,7 +192,7 @@ Optional: - `ism_history_rollover_check_period` (Number) The time between rollover checks for the audit history index in hours. The default value is `8`. - `ism_history_rollover_retention_period` (Number) How long audit history indices are kept in days. The default value is `30`. - `override_main_response_version` (Boolean) Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false. -- `reindex_remote_whitelist` (List of String) Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. +- `reindex_remote_whitelist` (Set of String) Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. - `script_max_compilations_rate` (String) Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context. - `search_max_buckets` (Number) Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined. - `thread_pool_analyze_queue_size` (Number) Size for the thread pool queue. See documentation for exact details. @@ -212,8 +212,8 @@ Optional: Optional: -- `internal_authentication_backend_limiting` (Block List, Max: 1) . (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners--internal_authentication_backend_limiting)) -- `ip_rate_limiting` (Block List, Max: 1) IP address rate limiting settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners--ip_rate_limiting)) +- `internal_authentication_backend_limiting` (Block List, Max: 1) (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners--internal_authentication_backend_limiting)) +- `ip_rate_limiting` (Block List, Max: 1) IP address rate limiting settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners--ip_rate_limiting)) ### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners.internal_authentication_backend_limiting` @@ -221,12 +221,12 @@ Optional: Optional: - `allowed_tries` (Number) The number of login attempts allowed before login is blocked. -- `authentication_backend` (String) The internal backend. Enter `internal`. +- `authentication_backend` (String) internal_authentication_backend_limiting.authentication_backend. - `block_expiry_seconds` (Number) The duration of time that login remains blocked after a failed login. -- `max_blocked_clients` (Number) The maximum number of blocked IP addresses. +- `max_blocked_clients` (Number) internal_authentication_backend_limiting.max_blocked_clients. - `max_tracked_clients` (Number) The maximum number of tracked IP addresses that have failed login. - `time_window_seconds` (Number) The window of time in which the value for `allowed_tries` is enforced. -- `type` (String) The type of rate limiting. +- `type` (String) internal_authentication_backend_limiting.type. diff --git a/docs/resources/pg.md b/docs/resources/pg.md index b4802935a..40f8347aa 100644 --- a/docs/resources/pg.md +++ b/docs/resources/pg.md @@ -115,33 +115,33 @@ Read-Only: Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `admin_password` (String, Sensitive) Custom password for admin user. Defaults to random string. This must be set only when a new service is being created. - `admin_username` (String) Custom username for admin user. This must be set only when a new service is being created. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. - `enable_ipv6` (Boolean) Register AAAA DNS records for the service, and allow IPv6 packets to service ports. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--pg_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `migration` (Block List, Max: 1) Migrate data from existing server. (see [below for nested schema](#nestedblock--pg_user_config--migration)) -- `pg` (Block List, Max: 1) postgresql.conf configuration values. (see [below for nested schema](#nestedblock--pg_user_config--pg)) -- `pg_read_replica` (Boolean, Deprecated) Use read_replica service integration instead. -- `pg_service_to_fork_from` (String, Deprecated) Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--pg_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--pg_user_config--migration)) +- `pg` (Block List, Max: 1) postgresql.conf configuration values (see [below for nested schema](#nestedblock--pg_user_config--pg)) +- `pg_read_replica` (Boolean) Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead). +- `pg_service_to_fork_from` (String) Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created. - `pg_stat_monitor_enable` (Boolean) Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`. - `pg_version` (String) PostgreSQL major version. -- `pgbouncer` (Block List, Max: 1) PGBouncer connection pooling settings. (see [below for nested schema](#nestedblock--pg_user_config--pgbouncer)) -- `pglookout` (Block List, Max: 1) PGLookout settings. (see [below for nested schema](#nestedblock--pg_user_config--pglookout)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--pg_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--pg_user_config--privatelink_access)) +- `pgbouncer` (Block List, Max: 1) PGBouncer connection pooling settings (see [below for nested schema](#nestedblock--pg_user_config--pgbouncer)) +- `pglookout` (Block List, Max: 1) PGLookout settings (see [below for nested schema](#nestedblock--pg_user_config--pglookout)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--pg_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--pg_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--pg_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--pg_user_config--public_access)) - `recovery_target_time` (String) Recovery target time when forking a service. This has effect only when a new service is being created. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `shared_buffers_percentage` (Number) Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. - `static_ips` (Boolean) Use static public IP addresses. - `synchronous_replication` (String) Synchronous replication type. Note that the service plan also needs to support synchronous replication. -- `timescaledb` (Block List, Max: 1) TimescaleDB extension configuration values. (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) +- `timescaledb` (Block List, Max: 1) TimescaleDB extension configuration values (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) - `variant` (String) Variant of the PostgreSQL service, may affect the features that are exposed by default. - `work_mem` (Number) Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). @@ -240,7 +240,7 @@ Optional: - `autodb_max_db_connections` (Number) Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. - `autodb_pool_mode` (String) PGBouncer pool mode. - `autodb_pool_size` (Number) If non-zero then create automatically a pool of that size per user when a pool doesn't exist. -- `ignore_startup_parameters` (List of String) List of parameters to ignore when given in startup packet. +- `ignore_startup_parameters` (Set of String) List of parameters to ignore when given in startup packet. - `min_pool_size` (Number) Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. - `server_idle_timeout` (Number) If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). - `server_lifetime` (Number) The pooler will close an unused server connection that has been connected longer than this. (seconds). diff --git a/docs/resources/redis.md b/docs/resources/redis.md index ae49a79e7..f92ad9bb4 100644 --- a/docs/resources/redis.md +++ b/docs/resources/redis.md @@ -77,15 +77,15 @@ resource "aiven_redis" "redis1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--redis_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `migration` (Block List, Max: 1) Migrate data from existing server. (see [below for nested schema](#nestedblock--redis_user_config--migration)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--redis_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--redis_user_config--privatelink_access)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--redis_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--redis_user_config--migration)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--redis_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--redis_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--redis_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--redis_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. - `redis_acl_channels_default` (String) Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default. - `redis_io_threads` (Number) Set Redis IO thread count. Changing this will cause a restart of the Redis service. diff --git a/go.mod b/go.mod index 60db78aea..543a78869 100644 --- a/go.mod +++ b/go.mod @@ -15,9 +15,9 @@ require ( github.com/hashicorp/terraform-plugin-go v0.19.0 github.com/hashicorp/terraform-plugin-mux v0.12.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 + github.com/iancoleman/strcase v0.3.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/liip/sheriff v0.11.1 - github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/testify v1.8.4 golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 golang.org/x/sync v0.5.0 diff --git a/go.sum b/go.sum index bd7440482..4be215f18 100644 --- a/go.sum +++ b/go.sum @@ -445,6 +445,8 @@ github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -528,8 +530,6 @@ github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJC github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/internal/schemautil/mutations.go b/internal/schemautil/mutations.go index 0cdc5e80d..3cc31d9ad 100644 --- a/internal/schemautil/mutations.go +++ b/internal/schemautil/mutations.go @@ -113,29 +113,3 @@ func normalizeIPFilter(old, new map[string]interface{}) { new[fieldToWrite] = append(normalizedIPFilters, nonexistentIPFilters...) } - -// stringSuffixForIPFilters adds a _string suffix to the IP filters. -func stringSuffixForIPFilters(new map[string]interface{}) { - if new["ip_filter"] == nil { - return - } - - ipFilters := new["ip_filter"].([]interface{}) - - new["ip_filter_string"] = ipFilters - - new["ip_filter"] = nil -} - -// stringSuffixForNamespaces adds a _string suffix to the namespaces. -func stringSuffixForNamespaces(new map[string]interface{}) { - namespaces := new["namespaces"].([]interface{}) - - if namespaces == nil { - return - } - - new["namespace_string"] = namespaces - - new["namespaces"] = nil -} diff --git a/internal/schemautil/schemautil.go b/internal/schemautil/schemautil.go index 06b39a90f..2a1cec463 100644 --- a/internal/schemautil/schemautil.go +++ b/internal/schemautil/schemautil.go @@ -13,9 +13,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// errInvalidStateType is an error that is returned when an invalid state type is encountered. -var errInvalidStateType = fmt.Errorf("invalid terraform state type") - // OptionalStringPointer retrieves a string pointer to a field, empty string // will be converted to nil func OptionalStringPointer(d *schema.ResourceData, key string) *string { @@ -317,25 +314,3 @@ func CopyServiceUserPropertiesFromAPIResponseToTerraform( return nil } - -// unmarshalUserConfig unmarshals the user config from the state to []map[string]interface{} format. -func unmarshalUserConfig(src interface{}) ([]map[string]interface{}, error) { - configList, ok := src.([]interface{}) - if !ok { - return nil, fmt.Errorf("%w: expected []interface{}", errInvalidStateType) - } - - // For some reason, it looks like this is never empty, even if the user config is not set. - // We will keep this check here just in case, but the actual check that breaks the code is - // the one where we check if the first element is nil. - if len(configList) == 0 || configList[0] == nil { - return nil, nil - } - - config, ok := configList[0].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("%w: expected map[string]interface{}", errInvalidStateType) - } - - return []map[string]interface{}{config}, nil -} diff --git a/internal/schemautil/service.go b/internal/schemautil/service.go index 785785011..856e0dbf5 100644 --- a/internal/schemautil/service.go +++ b/internal/schemautil/service.go @@ -13,8 +13,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/converters" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) // defaultTimeout is the default timeout for service operations. This is not a const because it can be changed during @@ -401,7 +401,7 @@ func resourceServiceCreate(ctx context.Context, d *schema.ResourceData, m interf return diag.Errorf("error getting project VPC ID: %s", err) } - cuc, err := apiconvert.ToAPI(userconfig.ServiceTypes, serviceType, d) + cuc, err := ExpandService(serviceType, d) if err != nil { return diag.Errorf( "error converting user config options for service type %s to API format: %s", serviceType, err, @@ -488,12 +488,11 @@ func ResourceServiceUpdate(ctx context.Context, d *schema.ResourceData, m interf return diag.Errorf("error getting project VPC ID: %s", err) } - st := d.Get("service_type").(string) - - cuc, err := apiconvert.ToAPI(userconfig.ServiceTypes, st, d) + serviceType := d.Get("service_type").(string) + cuc, err := ExpandService(serviceType, d) if err != nil { return diag.Errorf( - "error converting user config options for service type %s to API format: %s", st, err, + "error converting user config options for service type %s to API format: %s", serviceType, err, ) } @@ -657,37 +656,11 @@ func copyServicePropertiesFromAPIResponseToTerraform( } } - oldUserConfig, err := unmarshalUserConfig(d.Get(serviceType + "_user_config")) - if err != nil { - return err - } - - newUserConfig, err := apiconvert.FromAPI(userconfig.ServiceTypes, serviceType, s.UserConfig) + newUserConfig, err := FlattenService(serviceType, d, s.UserConfig) if err != nil { return err } - // Apply in-place user config mutations. - if len(oldUserConfig)*len(newUserConfig) != 0 { - oldUserConfigFirst := oldUserConfig[0] - - newUserConfigFirst := newUserConfig[0] - - // TODO: Remove when the remote schema in Aiven begins to contain information about sensitive fields. - copySensitiveFields(oldUserConfigFirst, newUserConfigFirst) - - // TODO: Remove when we no longer need to support the deprecated `ip_filter` field. - if _, exists := d.GetOk(serviceType + "_user_config.0.ip_filter_string"); exists { - stringSuffixForIPFilters(newUserConfigFirst) - } - - if _, exists := d.GetOk(serviceType + "_user_config.0.rules.0.mapping.0.namespaces_string"); exists { - stringSuffixForNamespaces(newUserConfigFirst) - } - - normalizeIPFilter(oldUserConfigFirst, newUserConfigFirst) - } - if err := d.Set(serviceType+"_user_config", newUserConfig); err != nil { return fmt.Errorf("cannot set `%s_user_config` : %s; Please make sure that all Aiven services have unique s names", serviceType, err) } @@ -857,3 +830,11 @@ func DatasourceServiceRead(ctx context.Context, d *schema.ResourceData, m interf return diag.Errorf("common %s/%s not found", projectName, serviceName) } + +func ExpandService(kind string, d *schema.ResourceData) (map[string]any, error) { + return converters.Expand(kind, service.GetUserConfig(kind), d) +} + +func FlattenService(kind string, d *schema.ResourceData, dto map[string]any) ([]map[string]any, error) { + return converters.Flatten(kind, service.GetUserConfig(kind), d, dto) +} diff --git a/internal/sdkprovider/service/cassandra/cassandra.go b/internal/sdkprovider/service/cassandra/cassandra.go index e9bfb72ed..a141c1e19 100644 --- a/internal/sdkprovider/service/cassandra/cassandra.go +++ b/internal/sdkprovider/service/cassandra/cassandra.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func cassandraSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func cassandraSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeCassandra+"_user_config"] = dist.ServiceTypeCassandra() + s[schemautil.ServiceTypeCassandra+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeCassandra) return s } diff --git a/internal/sdkprovider/service/clickhouse/clickhouse.go b/internal/sdkprovider/service/clickhouse/clickhouse.go index 51c58a636..b7ab4d006 100644 --- a/internal/sdkprovider/service/clickhouse/clickhouse.go +++ b/internal/sdkprovider/service/clickhouse/clickhouse.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func clickhouseSchema() map[string]*schema.Schema { @@ -18,7 +18,7 @@ func clickhouseSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeClickhouse+"_user_config"] = dist.ServiceTypeClickhouse() + s[schemautil.ServiceTypeClickhouse+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeClickhouse) s["service_integrations"] = &schema.Schema{ Type: schema.TypeList, Optional: true, diff --git a/internal/sdkprovider/service/flink/flink.go b/internal/sdkprovider/service/flink/flink.go index 9a14c5f56..3dedf73a0 100644 --- a/internal/sdkprovider/service/flink/flink.go +++ b/internal/sdkprovider/service/flink/flink.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenFlinkSchema() map[string]*schema.Schema { @@ -31,7 +31,7 @@ func aivenFlinkSchema() map[string]*schema.Schema { }, }, } - aivenFlinkSchema[schemautil.ServiceTypeFlink+"_user_config"] = dist.ServiceTypeFlink() + aivenFlinkSchema[schemautil.ServiceTypeFlink+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeFlink) return aivenFlinkSchema } diff --git a/internal/sdkprovider/service/grafana/grafana.go b/internal/sdkprovider/service/grafana/grafana.go index b4d63a5d8..befe69c77 100644 --- a/internal/sdkprovider/service/grafana/grafana.go +++ b/internal/sdkprovider/service/grafana/grafana.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func grafanaSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func grafanaSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeGrafana+"_user_config"] = dist.ServiceTypeGrafana() + s[schemautil.ServiceTypeGrafana+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeGrafana) return s } diff --git a/internal/sdkprovider/service/grafana/grafana_test.go b/internal/sdkprovider/service/grafana/grafana_test.go index caccea153..aafd6692b 100644 --- a/internal/sdkprovider/service/grafana/grafana_test.go +++ b/internal/sdkprovider/service/grafana/grafana_test.go @@ -499,3 +499,65 @@ resource "aiven_grafana" "grafana" { `, prefix, project, ipFilterObjs) } + +// TestAccAiven_grafana_set_change tests that changing a set actually changes it count +// This is a test for diff suppressor doesn't suppress set's items. +func TestAccAiven_grafana_set_change(t *testing.T) { + resourceName := "aiven_grafana.bar" + rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + ProtoV6ProviderFactories: acc.TestProtoV6ProviderFactories, + CheckDestroy: acc.TestAccCheckAivenServiceResourceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGrafanaResourceSetChange(rName, "100, 101, 111"), + Check: resource.ComposeTestCheckFunc( + acc.TestAccCheckAivenServiceCommonAttributes("data.aiven_grafana.common"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.client_id", "my_client_id"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.client_secret", "my_client_secret"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.team_ids.#", "3"), + ), + }, + { + Config: testAccGrafanaResourceSetChange(rName, "111"), + Check: resource.ComposeTestCheckFunc( + acc.TestAccCheckAivenServiceCommonAttributes("data.aiven_grafana.common"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.team_ids.#", "1"), + ), + }, + }, + }) +} + +func testAccGrafanaResourceSetChange(name, teamIDs string) string { + return fmt.Sprintf(` +data "aiven_project" "foo" { + project = "%s" +} + +resource "aiven_grafana" "bar" { + project = data.aiven_project.foo.project + cloud_name = "google-europe-west1" + plan = "startup-1" + service_name = "test-acc-sr-%s" + maintenance_window_dow = "monday" + maintenance_window_time = "10:00:00" + + grafana_user_config { + auth_github { + client_id = "my_client_id" + client_secret = "my_client_secret" + team_ids = [%s] + } + } +} + +data "aiven_grafana" "common" { + service_name = aiven_grafana.bar.service_name + project = data.aiven_project.foo.project + + depends_on = [aiven_grafana.bar] +}`, os.Getenv("AIVEN_PROJECT_NAME"), name, teamIDs) +} diff --git a/internal/sdkprovider/service/influxdb/influxdb.go b/internal/sdkprovider/service/influxdb/influxdb.go index 070153592..6e4a7dc46 100644 --- a/internal/sdkprovider/service/influxdb/influxdb.go +++ b/internal/sdkprovider/service/influxdb/influxdb.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func influxDBSchema() map[string]*schema.Schema { @@ -25,7 +25,7 @@ func influxDBSchema() map[string]*schema.Schema { }, }, } - s[schemautil.ServiceTypeInfluxDB+"_user_config"] = dist.ServiceTypeInfluxdb() + s[schemautil.ServiceTypeInfluxDB+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeInfluxDB) return s } diff --git a/internal/sdkprovider/service/kafka/kafka.go b/internal/sdkprovider/service/kafka/kafka.go index 094efcb0d..3af10f1b5 100644 --- a/internal/sdkprovider/service/kafka/kafka.go +++ b/internal/sdkprovider/service/kafka/kafka.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenKafkaSchema() map[string]*schema.Schema { @@ -70,7 +70,7 @@ func aivenKafkaSchema() map[string]*schema.Schema { }, }, } - aivenKafkaSchema[schemautil.ServiceTypeKafka+"_user_config"] = dist.ServiceTypeKafka() + aivenKafkaSchema[schemautil.ServiceTypeKafka+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeKafka) return aivenKafkaSchema } diff --git a/internal/sdkprovider/service/kafka/kafka_connect.go b/internal/sdkprovider/service/kafka/kafka_connect.go index 7b7b8af9c..90ed59498 100644 --- a/internal/sdkprovider/service/kafka/kafka_connect.go +++ b/internal/sdkprovider/service/kafka/kafka_connect.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenKafkaConnectSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenKafkaConnectSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - kafkaConnectSchema[schemautil.ServiceTypeKafkaConnect+"_user_config"] = dist.ServiceTypeKafkaConnect() + kafkaConnectSchema[schemautil.ServiceTypeKafkaConnect+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeKafkaConnect) return kafkaConnectSchema } diff --git a/internal/sdkprovider/service/kafka/kafka_mirrormaker.go b/internal/sdkprovider/service/kafka/kafka_mirrormaker.go index d291c71b6..589241239 100644 --- a/internal/sdkprovider/service/kafka/kafka_mirrormaker.go +++ b/internal/sdkprovider/service/kafka/kafka_mirrormaker.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenKafkaMirrormakerSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenKafkaMirrormakerSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - kafkaMMSchema[schemautil.ServiceTypeKafkaMirrormaker+"_user_config"] = dist.ServiceTypeKafkaMirrormaker() + kafkaMMSchema[schemautil.ServiceTypeKafkaMirrormaker+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeKafkaMirrormaker) return kafkaMMSchema } diff --git a/internal/sdkprovider/service/kafka/kafka_test.go b/internal/sdkprovider/service/kafka/kafka_test.go index 4e85d7810..68699dc20 100644 --- a/internal/sdkprovider/service/kafka/kafka_test.go +++ b/internal/sdkprovider/service/kafka/kafka_test.go @@ -343,7 +343,7 @@ func testAccCheckAivenServiceKafkaAttributes(n string) resource.TestCheckFunc { } } -func testAccKafkaResourceUserConfigKafkaNullFieldsOnly(project, prefix string) string { +func testAccKafkaResourceUserConfigKafkaOmitsNullFields(project, prefix string) string { return fmt.Sprintf(` resource "aiven_kafka" "kafka" { project = "%s" @@ -366,7 +366,7 @@ resource "aiven_kafka" "kafka" { `, project, prefix) } -func TestAccAiven_kafka_userconfig_kafka_null_fields_only(t *testing.T) { +func TestAccAiven_kafka_user_config_kafka_omits_null_fields(t *testing.T) { project := os.Getenv("AIVEN_PROJECT_NAME") prefix := "test-tf-acc-" + acctest.RandString(7) resourceName := "aiven_kafka.kafka" @@ -376,13 +376,11 @@ func TestAccAiven_kafka_userconfig_kafka_null_fields_only(t *testing.T) { CheckDestroy: acc.TestAccCheckAivenServiceResourceDestroy, Steps: []resource.TestStep{ { - Config: testAccKafkaResourceUserConfigKafkaNullFieldsOnly(project, prefix), + Config: testAccKafkaResourceUserConfigKafkaOmitsNullFields(project, prefix), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"), resource.TestCheckResourceAttr(resourceName, "kafka_user_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.0.group_max_session_timeout_ms", "0"), - resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.0.log_retention_bytes", "0"), + resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.#", "0"), ), }, }, diff --git a/internal/sdkprovider/service/m3db/m3aggregator.go b/internal/sdkprovider/service/m3db/m3aggregator.go index a5c2cabd0..6e0593a34 100644 --- a/internal/sdkprovider/service/m3db/m3aggregator.go +++ b/internal/sdkprovider/service/m3db/m3aggregator.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenM3AggregatorSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenM3AggregatorSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - schemaM3[schemautil.ServiceTypeM3Aggregator+"_user_config"] = dist.ServiceTypeM3aggregator() + schemaM3[schemautil.ServiceTypeM3Aggregator+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeM3Aggregator) return schemaM3 } diff --git a/internal/sdkprovider/service/m3db/m3db.go b/internal/sdkprovider/service/m3db/m3db.go index ce3197097..e4ae79567 100644 --- a/internal/sdkprovider/service/m3db/m3db.go +++ b/internal/sdkprovider/service/m3db/m3db.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenM3DBSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenM3DBSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - schemaM3[schemautil.ServiceTypeM3+"_user_config"] = dist.ServiceTypeM3db() + schemaM3[schemautil.ServiceTypeM3+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeM3) return schemaM3 } diff --git a/internal/sdkprovider/service/mysql/mysql.go b/internal/sdkprovider/service/mysql/mysql.go index 499160330..11f3a0c7d 100644 --- a/internal/sdkprovider/service/mysql/mysql.go +++ b/internal/sdkprovider/service/mysql/mysql.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenMySQLSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenMySQLSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - schemaMySQL[schemautil.ServiceTypeMySQL+"_user_config"] = dist.ServiceTypeMysql() + schemaMySQL[schemautil.ServiceTypeMySQL+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeMySQL) return schemaMySQL } diff --git a/internal/sdkprovider/service/opensearch/opensearch.go b/internal/sdkprovider/service/opensearch/opensearch.go index 6076d8753..2478b8538 100644 --- a/internal/sdkprovider/service/opensearch/opensearch.go +++ b/internal/sdkprovider/service/opensearch/opensearch.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func opensearchSchema() map[string]*schema.Schema { @@ -26,7 +26,7 @@ func opensearchSchema() map[string]*schema.Schema { }, }, } - s[schemautil.ServiceTypeOpenSearch+"_user_config"] = dist.ServiceTypeOpensearch() + s[schemautil.ServiceTypeOpenSearch+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeOpenSearch) return s } diff --git a/internal/sdkprovider/service/pg/pg.go b/internal/sdkprovider/service/pg/pg.go index dad48acc5..c679335b1 100644 --- a/internal/sdkprovider/service/pg/pg.go +++ b/internal/sdkprovider/service/pg/pg.go @@ -12,10 +12,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenPGSchema() map[string]*schema.Schema { @@ -80,7 +78,7 @@ func aivenPGSchema() map[string]*schema.Schema { }, }, } - schemaPG[schemautil.ServiceTypePG+"_user_config"] = dist.ServiceTypePg() + schemaPG[schemautil.ServiceTypePG+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypePG) return schemaPG } @@ -135,11 +133,10 @@ func resourceServicePGUpdate(ctx context.Context, d *schema.ResourceData, m inte return diag.FromErr(err) } - userConfig, err := apiconvert.ToAPI(userconfig.ServiceTypes, "pg", d) + userConfig, err := schemautil.ExpandService(schemautil.ServiceTypePG, d) if err != nil { return diag.FromErr(err) } - if userConfig["pg_version"] != nil { s, err := client.Services.Get(ctx, projectName, serviceName) if err != nil { diff --git a/internal/sdkprovider/service/redis/redis.go b/internal/sdkprovider/service/redis/redis.go index 3d6b8f71c..985de4efd 100644 --- a/internal/sdkprovider/service/redis/redis.go +++ b/internal/sdkprovider/service/redis/redis.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func redisSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func redisSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeRedis+"_user_config"] = dist.ServiceTypeRedis() + s[schemautil.ServiceTypeRedis+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeRedis) return s } diff --git a/internal/sdkprovider/userconfig/converters/converters.go b/internal/sdkprovider/userconfig/converters/converters.go new file mode 100644 index 000000000..5c16ab93a --- /dev/null +++ b/internal/sdkprovider/userconfig/converters/converters.go @@ -0,0 +1,430 @@ +package converters + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const userConfigSuffix = "_user_config" + +// Expand expands schema.ResourceData into a DTO map +func Expand(kind string, s *schema.Schema, d *schema.ResourceData) (map[string]any, error) { + key := kind + userConfigSuffix + state := &stateCompose{ + key: key, + path: key + ".0", // starts from root user config + schema: s, + resource: d, + } + + // When "configs" is empty, then we need to delete all arrays in it. + // That's why it doesn't exit here. + configs := d.GetRawConfig().GetAttr(key).AsValueSlice() + if len(configs) > 0 { + state.config = configs[0] + } + + dto, err := expandObj(state) + if err != nil { + return nil, err + } + + renameAliases(dto) + return dto, nil +} + +// stateCompose combines "raw state" and schema.ResourceData +// With the state it is possible to say "if value is null", hence is defined by user. +// With schema.ResourceData you get the value. +type stateCompose struct { + key string // state attribute name or schema.ResourceData key + path string // schema.ResourceData path, i.e. foo.0.bar.0.baz + schema *schema.Schema + config cty.Value + resource *schema.ResourceData +} + +// setItems returns schema.Set values that has state. +func (s *stateCompose) setItems() []any { + result := make([]any, 0) + if s.config.IsNull() { + // Makes possible to send ip_filter=[] to clear the remote list. + return result + } + + // Builds elements hash map + hashes := make(map[string]bool, s.config.LengthInt()) + for _, item := range s.config.AsValueSlice() { + if item.Type() == cty.String { + hashes[item.AsString()] = true + } else { + hashes[item.AsBigFloat().String()] = true + } + } + + // Picks up values with a state only + for _, v := range s.get().(*schema.Set).List() { + if hashes[fmt.Sprintf("%v", v)] { + result = append(result, v) + } + } + return result +} + +// listItems returns a list of object's states +func (s *stateCompose) listItems() (result []*stateCompose) { + if s.config.IsNull() { + return result + } + for i, v := range s.config.AsValueSlice() { + c := &stateCompose{ + key: s.key, + path: fmt.Sprintf("%s.%d", s.path, i), + schema: s.schema, + config: v, + resource: s.resource, + } + result = append(result, c) + } + return result +} + +// objectProperties returns object's properties states +func (s *stateCompose) objectProperties() map[string]*stateCompose { + props := make(map[string]*stateCompose) + res := s.schema.Elem.(*schema.Resource) + for key, subSchema := range res.Schema { + if subSchema.ForceNew && !s.resource.IsNewResource() { + continue + } + + var config cty.Value + if !s.config.IsNull() { + // Can't get value from nil + config = s.config.GetAttr(key) + } + + p := &stateCompose{ + key: key, + path: fmt.Sprintf("%s.%s", s.path, key), + resource: s.resource, + config: config, + schema: subSchema, + } + + props[key] = p + } + return props +} +func (s *stateCompose) get() any { + return s.resource.Get(s.path) +} + +func (s *stateCompose) isNull() bool { + return s.config.IsNull() +} + +func (s *stateCompose) hasChange() bool { + return s.resource.HasChange(s.path) +} + +func expandObj(state *stateCompose) (map[string]any, error) { + m := make(map[string]any) + for k, v := range state.objectProperties() { + value, err := expandAttr(v) + if err != nil { + return nil, fmt.Errorf("%q field conversion error: %w", k, err) + } + if value != nil { + m[k] = value + } + } + return m, nil +} + +// expandAttr returns go value +func expandAttr(state *stateCompose) (any, error) { + switch state.schema.Type { + case schema.TypeString, schema.TypeBool, schema.TypeInt, schema.TypeFloat: + if state.isNull() { + // Null scalar, no value in the config + return nil, nil + } + return state.get(), nil + } + + if state.schema.Type == schema.TypeSet { + if state.isNull() && !state.hasChange() { + // A value that hasn't been sent by user yet. + // But has been received from the API. + return nil, nil + } + + return state.setItems(), nil + } + + // schema.TypeList + states := state.listItems() + items := make([]any, 0, len(states)) + for i := range states { + exp, err := expandObj(states[i]) + if err != nil { + return nil, err + } + // If an object is not empty + if exp != nil { + items = append(items, exp) + } + } + + // That is a list of objects + if state.schema.MaxItems != 1 { + return items, nil + } + + // If schema.TypeList && MaxItems == 1, then it is an object + switch len(items) { + case 1: + // A plain object (in TF a list with one object is an object) + return items[0], nil + case 0: + // The object has no state or removed. + // We can't remove objects from state, so send a nil. + return nil, nil + default: + // If MaxItems == 1, then this shouldn't ever happen + return nil, fmt.Errorf("unexpected list length %d for key %s", len(items), state.key) + } +} + +func renameAliases(dto map[string]any) { + keys := []struct { + path string + name string + aliases []string + }{ + { + path: "", // root + name: "ip_filter", + aliases: []string{"ip_filter_string", "ip_filter_object"}, + }, + { + path: "rules.0.mapping", + name: "namespaces", + aliases: []string{"namespaces_string", "namespaces_object"}, + }, + } + + for _, key := range keys { + var branches []map[string]any + if key.path == "" { + branches = append(branches, dto) + } else { + v, ok := drillKey(dto, key.path) + if !ok { + // branch does not exist, nothing to do + continue + } + + // It can be a list of maps, or just one map + switch v.(type) { + case []any: + branches = asMapList(v) + default: + branches = append(branches, v.(map[string]any)) + } + } + + for _, branch := range branches { + for _, alias := range key.aliases { + // Copies only non-zero values. + // For instance: foo=[], foo_string=[val], foo_object=[] + // "foo_object" shouldn't override "foo_string" + if v, ok := branch[alias]; ok { + // It is valid to send an empty list. + // So we must choose non-empty alias. + if a, ok := v.([]any); ok && len(a) > 0 { + branch[key.name] = v + } + delete(branch, alias) + } + } + } + } +} + +// Flatten flattens DTO into a terraform compatible object +func Flatten(kind string, s *schema.Schema, d *schema.ResourceData, dto map[string]any) ([]map[string]any, error) { + withPrefix := func(v string) string { + return fmt.Sprintf("%s%s.0.%s", kind, userConfigSuffix, v) + } + + // Renames ip_filter field + if _, ok := dto["ip_filter"]; ok { + assignAlias(d, withPrefix("ip_filter"), dto, "ip_filter", "network") + } + + // Renames namespaces field + if mapping, ok := drillKey(dto, "rules.0.mapping"); ok { + assignAlias(d, withPrefix("rules.0.mapping.0.namespaces"), mapping.(map[string]any), "namespaces", "name") + } + + // Copies "create only" fields from the original config. + // Like admin_password, that is received only on POST request when service is created. + for _, k := range createOnlyFields() { + v, ok := d.GetOk(withPrefix(k)) + if ok { + dto[k] = v + } + } + + r := s.Elem.(*schema.Resource) + tfo, err := flattenObj(r.Schema, dto) + if tfo == nil || err != nil { + return nil, err + } + return []map[string]any{tfo}, nil +} + +func flattenObj(s map[string]*schema.Schema, dto map[string]any) (map[string]any, error) { + tfo := make(map[string]any) + for k, v := range s { + vv, ok := dto[k] + if !ok { + continue + } + + if vv == nil { + continue + } + + value, err := flattenAttr(v, vv) + if err != nil { + return nil, fmt.Errorf("%q field conversion error: %w", k, err) + } + + if value != nil { + tfo[k] = value + } + } + if len(tfo) == 0 { + return nil, nil + } + return tfo, nil +} + +func flattenAttr(s *schema.Schema, data any) (any, error) { + switch s.Type { + case schema.TypeString: + return castType[string](data) + case schema.TypeBool: + return castType[bool](data) + case schema.TypeInt: + i, err := data.(json.Number).Int64() + return int(i), err + case schema.TypeFloat: + return data.(json.Number).Float64() + } + + scalarSchema, isScalar := s.Elem.(*schema.Schema) + if isScalar { + values := make([]any, 0) + for _, v := range data.([]any) { + val, err := flattenAttr(scalarSchema, v) + if err != nil { + return nil, err + } + values = append(values, val) + } + return schema.NewSet(schema.HashSchema(scalarSchema), values), nil + } + + // Single object or list of objects + r := s.Elem.(*schema.Resource) + if s.Type == schema.TypeList { + var list []any + if o, isObject := data.(map[string]any); isObject { + // Single object + if len(o) != 0 { + list = append(list, o) + } + } else { + // List of objects + list = data.([]any) + } + + return flattenList(r.Schema, list) + } + + // Array of scalars + items, err := flattenList(r.Schema, data.([]any)) + if items == nil || err != nil { + return nil, err + } + + return schema.NewSet(schema.HashResource(r), items), nil +} + +func flattenList(s map[string]*schema.Schema, list []any) ([]any, error) { + if len(list) == 0 { + return nil, nil + } + + items := make([]any, 0, len(list)) + for _, item := range list { + v, err := flattenObj(s, item.(map[string]any)) + if err != nil { + return nil, err + } + if v != nil { + items = append(items, v) + } + } + return items, nil +} + +// assignAlias renames keys for multi-typed properties, i.e. ip_filter -> [ip_filter_string, ip_filter_object] +func assignAlias(d *schema.ResourceData, path string, dto map[string]any, key, sortBy string) { + values, ok := dto[key].([]any) + if !ok || len(values) == 0 { + return + } + + var suffix string + const ( + str = "_string" + obj = "_object" + ) + + // If DTO has objects, then it is foo_object + if _, ok := values[0].(map[string]any); ok { + suffix = obj + + // State objects have specific order. + // Must sort DTO objects, otherwise diff shows changes. + if inStateObjs, ok := d.GetOk(path + obj); ok { + dto[key] = sortByKey(sortBy, inStateObjs, dto[key]) + } + } + + // If the state has foo_string, the user has new key + if _, ok := d.GetOk(path + str); ok { + suffix = str + } + + if suffix != "" { + dto[key+suffix] = dto[key] + delete(dto, key) + } +} + +// createOnlyFields these fields are received on POST request only +func createOnlyFields() []string { + return []string{ + "admin_username", + "admin_password", + } +} diff --git a/internal/sdkprovider/userconfig/converters/utils.go b/internal/sdkprovider/userconfig/converters/utils.go new file mode 100644 index 000000000..2fb6822b3 --- /dev/null +++ b/internal/sdkprovider/userconfig/converters/utils.go @@ -0,0 +1,102 @@ +package converters + +import ( + "fmt" + "sort" + "strings" +) + +// sortByKey sorts the given array of objects by values in the original array by the given key. +// For instance, when ip_filter_object list is sent, it is sorted on the backend. +// That makes a diff, because user defined order is violated. +func sortByKey(sortBy string, originalSrc, dtoSrc any) any { + original := asMapList(originalSrc) + dto := asMapList(dtoSrc) + if len(original) != len(dto) { + return dtoSrc + } + + sortMap := make(map[string]int) + for i, v := range original { + sortMap[v[sortBy].(string)] = i + } + + sort.Slice(dto, func(i, j int) bool { + ii := dto[i][sortBy].(string) + jj := dto[j][sortBy].(string) + return sortMap[ii] > sortMap[jj] + }) + + // Need to cast to "any", + // otherwise it might blow up in flattenObj function + // with type mismatch (map[string]any vs any) + result := make([]any, 0, len(dto)) + for _, v := range dto { + result = append(result, v) + } + return result +} + +// drillKey gets deep down key value +func drillKey(dto map[string]any, path string) (any, bool) { + if dto == nil { + return nil, false + } + + keys := strings.Split(path, ".0.") + keysLen := len(keys) - 1 + for i := 0; ; i++ { + v, ok := dto[keys[i]] + if !ok { + return nil, false + } + + isLast := i == keysLen + if isLast { + return v, true + } + + next, ok := v.(map[string]any) + if ok { + dto = next + continue + } + + // Gets the first element of an array + list, ok := v.([]any) + if !ok || len(list) == 0 { + return nil, false + } + + next, ok = list[0].(map[string]any) + if !ok { + return nil, false + } + dto = next + } +} + +// asList converts "any" to specific typed list +func asList[T any](v any) []T { + list := v.([]any) + result := make([]T, 0, len(list)) + for _, item := range list { + result = append(result, item.(T)) + } + return result +} + +// asMapList converts "any" to a list of objects +func asMapList(v any) []map[string]any { + return asList[map[string]any](v) +} + +// castType returns an error on invalid type +func castType[T any](v any) (T, error) { + t, ok := v.(T) + if !ok { + var empty T + return empty, fmt.Errorf("invalid type. Expected %T, got %T", empty, v) + } + return t, nil +} diff --git a/internal/sdkprovider/userconfig/converters/utils_test.go b/internal/sdkprovider/userconfig/converters/utils_test.go new file mode 100644 index 000000000..68c28e423 --- /dev/null +++ b/internal/sdkprovider/userconfig/converters/utils_test.go @@ -0,0 +1,150 @@ +package converters + +import ( + "encoding/json" + "fmt" + "regexp" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDrillKey(t *testing.T) { + js := `{ + "rules": { + "mapping": [ + { + "namespaces": ["original"], + "namespaces_string": ["string"], + "namespaces_object": [ + { + "retention": "40h" + } + ] + } + ] + } + }` + + var m map[string]any + err := json.Unmarshal([]byte(js), &m) + require.NoError(t, err) + + cases := []struct { + key string + expectOK bool + expectValue any + }{ + { + key: "rules.0.mapping.0.namespaces", + expectOK: true, + expectValue: []any{"original"}, + }, + { + key: "rules.0.mapping.0.namespaces_string", + expectOK: true, + expectValue: []any{"string"}, + }, + { + key: "rules.0.mapping.0.namespaces_object", + expectOK: true, + expectValue: []any{map[string]any{"retention": "40h"}}, + }, + { + key: "rules.0.unknown", + expectOK: false, + expectValue: nil, + }, + { + key: "unknown", + expectOK: false, + expectValue: nil, + }, + } + + for i, opt := range cases { + t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + v, ok := drillKey(m, opt.key) + assert.Equal(t, opt.expectOK, ok) + assert.Empty(t, cmp.Diff(opt.expectValue, v)) + }) + } +} + +func TestRenameAliases(t *testing.T) { + cases := []struct { + name string + src string + expected string + }{ + { + name: "keeps original key", + src: `{"ip_filter": ["0.0.0.0/0"]}`, + expected: `{"ip_filter": ["0.0.0.0/0"]}`, + }, + { + name: "chooses original out if 3", + src: `{"ip_filter": ["0.0.0.0/0"], "ip_filter_string": [], "ip_filter_object": []}`, + expected: `{"ip_filter": ["0.0.0.0/0"]}`, + }, + { + name: "chooses string out if 3", + src: `{"ip_filter": [], "ip_filter_string": ["0.0.0.0/0"], "ip_filter_object": []}`, + expected: `{"ip_filter": ["0.0.0.0/0"]}`, + }, + { + name: "ignores unknown key", + src: `{"whatever": ["0.0.0.0/0"]}`, + expected: `{"whatever": ["0.0.0.0/0"]}`, + }, + { + name: `renames "_string" prefix`, + src: `{"ip_filter_string": ["0.0.0.0/0"]}`, + expected: `{"ip_filter": ["0.0.0.0/0"]}`, + }, + { + name: "renames _object prefix", + src: `{"ip_filter_object": [{"name": "foo"}]}`, + expected: `{"ip_filter": [{"name": "foo"}]}`, + }, + { + name: "ignores namespaces_string on the root level", + src: `{"namespaces_string": {"name": "foo"}}`, + expected: `{"namespaces_string": {"name": "foo"}}`, + }, + { + name: "renames namespaces_string where expected", + src: `{ + "rules": {"mapping": [{"namespaces_string": ["string"]}]} + }`, + expected: `{ + "rules": {"mapping": [{"namespaces": ["string"]}]} + }`, + }, + { + name: "renames namespaces_object where expected", + src: `{ + "rules": {"mapping": [{"namespaces_object": [{"name": "foo"}]}]} + }`, + expected: `{ + "rules": {"mapping": [{"namespaces": [{"name": "foo"}]}]} + }`, + }, + } + + reSpaces := regexp.MustCompile(`\s+`) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + var m map[string]any + err := json.Unmarshal([]byte(opt.src), &m) + require.NoError(t, err) + + renameAliases(m) + b, err := json.Marshal(&m) + require.NoError(t, err) + assert.Empty(t, cmp.Diff(reSpaces.ReplaceAllString(opt.expected, ""), string(b))) + }) + } +} diff --git a/internal/sdkprovider/userconfig/diff/diff.go b/internal/sdkprovider/userconfig/diff/diff.go new file mode 100644 index 000000000..a3d8f1b91 --- /dev/null +++ b/internal/sdkprovider/userconfig/diff/diff.go @@ -0,0 +1,66 @@ +package diff + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// reIsSetElement Set item ends with a 9-length hash int. +var reIsSetElement = regexp.MustCompile(`\.[0-9]{9}$`) + +// SuppressUnchanged suppresses diff for unchanged fields. +// Applied for all nested values: both for objects and arrays. +func SuppressUnchanged(k, old, new string, d *schema.ResourceData) bool { + // Lists, sets and objects (object is list with one item). + if k[len(k)-1:] == "#" { + if d.HasChange(k) { + // By some reason terraform might mark objects as "changed". + // In that case, terraform returns a list with a nil value. + // "nil" means that the object hasn't changed. + key := strings.TrimSuffix(k, ".#") + v, ok := d.Get(key).([]any) + return ok && len(v) == 1 && v[0] == nil + } + + // Suppress empty objects and empty arrays + return true + } + + // Ip filter items handled with a special suppressor. + if strings.Contains(k, ".ip_filter.") || strings.Contains(k, ".ip_filter_string.") { + return suppressIPFilterSet(k, old, new, d) + } + + // Doesn't suppress "set" items. + if reIsSetElement.MatchString(k) { + return false + } + + // Object properties. + // "old" — is something read from API + // "new" — is what is read from tf file + // If value is "computed" (received as default) it has non-empty old (any value) and empty "new" (zero value). + // For instance, when you create kafka it gets "kafka_version = 3.5", + // while it's not in your tf file, terraform shows a diff. + // This switch suppresses that, as well, as other "default" values. + switch new { + case "", "0", "false": + // "" — kafka_version = "3.5" -> "" + // 0 — backup_hour = "4" -> 0 + // false — allow_sign_up = true -> false + return !d.HasChange(k) + } + return false +} + +// suppressIPFilterSet ip_filter list has specific logic, like default list value +func suppressIPFilterSet(k, old, new string, d *schema.ResourceData) bool { + // Suppresses ip_filter = [0.0.0.0/0] + path := strings.Split(k, ".") + // Turns ~ip_filter.1234 to ~ip_filter.# + v, ok := d.GetOk(strings.Join(path[:len(path)-1], ".") + ".#") + // Literally, if the value is "0.0.0.0/0" and the parent's length is "1" + return old == "0.0.0.0/0" && new == "" && ok && v.(int) == 1 +} diff --git a/internal/sdkprovider/userconfig/service/cassandra.go b/internal/sdkprovider/userconfig/service/cassandra.go new file mode 100644 index 000000000..ddfd4cbff --- /dev/null +++ b/internal/sdkprovider/userconfig/service/cassandra.go @@ -0,0 +1,160 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func cassandraUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Cassandra user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Deprecated: "This property is deprecated.", + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "backup_hour": { + Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "backup_minute": { + Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "cassandra": { + Description: "cassandra configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "batch_size_fail_threshold_in_kb": { + Description: "Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.", + Optional: true, + Type: schema.TypeInt, + }, + "batch_size_warn_threshold_in_kb": { + Description: "Log a warning message on any multiple-partition batch size exceeding this value.5kb per batch by default.Caution should be taken on increasing the size of this thresholdas it can lead to node instability.", + Optional: true, + Type: schema.TypeInt, + }, + "datacenter": { + Description: "Name of the datacenter to which nodes of this service belong. Can be set only when creating the service.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "cassandra_version": { + Description: "Cassandra major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"4", "3"}, false), + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migrate_sstableloader": { + Description: "Sets the service into migration mode enabling the sstableloader utility to be used to upload Cassandra data files. Available only on service create.", + Optional: true, + Type: schema.TypeBool, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "service_to_join_with": { + Description: "When bootstrapping, instead of creating a new Cassandra cluster try to join an existing one from another service. Can only be set on service creation.", + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/clickhouse.go b/internal/sdkprovider/userconfig/service/clickhouse.go new file mode 100644 index 000000000..cdfb64870 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/clickhouse.go @@ -0,0 +1,171 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func clickhouseUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Clickhouse user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "clickhouse": { + Description: "Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_https": { + Description: "Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_mysql": { + Description: "Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "clickhouse": { + Description: "Enable clickhouse.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_https": { + Description: "Enable clickhouse_https.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_mysql": { + Description: "Enable clickhouse_mysql.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "clickhouse": { + Description: "Allow clients to connect to clickhouse from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_https": { + Description: "Allow clients to connect to clickhouse_https from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_mysql": { + Description: "Allow clients to connect to clickhouse_mysql from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/flink.go b/internal/sdkprovider/userconfig/service/flink.go new file mode 100644 index 000000000..8eb46ad14 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/flink.go @@ -0,0 +1,105 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func flinkUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Flink user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "flink_version": { + Description: "Flink major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.16"}, false), + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "number_of_task_slots": { + Description: "Task slots per node. For a 3 node plan, total number of task slots is 3x this value.", + Optional: true, + Type: schema.TypeInt, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "flink": { + Description: "Enable flink.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/grafana.go b/internal/sdkprovider/userconfig/service/grafana.go new file mode 100644 index 000000000..588cc6a24 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/grafana.go @@ -0,0 +1,622 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func grafanaUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Grafana user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "alerting_enabled": { + Description: "Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.", + Optional: true, + Type: schema.TypeBool, + }, + "alerting_error_or_timeout": { + Description: "Default error or timeout setting for new alerting rules.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"alerting", "keep_state"}, false), + }, + "alerting_max_annotations_to_keep": { + Description: "Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.", + Optional: true, + Type: schema.TypeInt, + }, + "alerting_nodata_or_nullvalues": { + Description: "Default value for 'no data or null values' for new alerting rules.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"alerting", "no_data", "keep_state", "ok"}, false), + }, + "allow_embedding": { + Description: "Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.", + Optional: true, + Type: schema.TypeBool, + }, + "auth_azuread": { + Description: "Azure AD OAuth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_domains": { + Description: "Allowed domains.", + Elem: &schema.Schema{ + Description: "Allowed domain.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "allowed_groups": { + Description: "Require users to belong to one of given groups.", + Elem: &schema.Schema{ + Description: "Group Object ID from Azure AD.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "auth_url": { + Description: "Authorization URL.", + Required: true, + Type: schema.TypeString, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "token_url": { + Description: "Token URL.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_basic_enabled": { + Description: "Enable or disable basic authentication form, used by Grafana built-in login.", + Optional: true, + Type: schema.TypeBool, + }, + "auth_generic_oauth": { + Description: "Generic OAuth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_domains": { + Description: "Allowed domains.", + Elem: &schema.Schema{ + Description: "Allowed domain.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "allowed_organizations": { + Description: "Require user to be member of one of the listed organizations.", + Elem: &schema.Schema{ + Description: "Allowed organization.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "api_url": { + Description: "API URL.", + Required: true, + Type: schema.TypeString, + }, + "auth_url": { + Description: "Authorization URL.", + Required: true, + Type: schema.TypeString, + }, + "auto_login": { + Description: "Allow users to bypass the login screen and automatically log in.", + Optional: true, + Type: schema.TypeBool, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "name": { + Description: "Name of the OAuth integration.", + Optional: true, + Type: schema.TypeString, + }, + "scopes": { + Description: "OAuth scopes.", + Elem: &schema.Schema{ + Description: "OAuth scope.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "token_url": { + Description: "Token URL.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_github": { + Description: "Github Auth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_organizations": { + Description: "Require users to belong to one of given organizations.", + Elem: &schema.Schema{ + Description: "Organization name.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "team_ids": { + Description: "Require users to belong to one of given team IDs.", + Elem: &schema.Schema{ + Description: "Team ID.", + Type: schema.TypeInt, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_gitlab": { + Description: "GitLab Auth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_groups": { + Description: "Require users to belong to one of given groups.", + Elem: &schema.Schema{ + Description: "Group or subgroup name.", + Type: schema.TypeString, + }, + MaxItems: 50, + Required: true, + Type: schema.TypeSet, + }, + "api_url": { + Description: "API URL. This only needs to be set when using self hosted GitLab.", + Optional: true, + Type: schema.TypeString, + }, + "auth_url": { + Description: "Authorization URL. This only needs to be set when using self hosted GitLab.", + Optional: true, + Type: schema.TypeString, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "token_url": { + Description: "Token URL. This only needs to be set when using self hosted GitLab.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_google": { + Description: "Google Auth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_domains": { + Description: "Domains allowed to sign-in to this Grafana.", + Elem: &schema.Schema{ + Description: "Domain.", + Type: schema.TypeString, + }, + MaxItems: 64, + Required: true, + Type: schema.TypeSet, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "cookie_samesite": { + Description: "Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"lax", "strict", "none"}, false), + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "dashboard_previews_enabled": { + Description: "This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.", + Optional: true, + Type: schema.TypeBool, + }, + "dashboards_min_refresh_interval": { + Description: "Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.", + Optional: true, + Type: schema.TypeString, + }, + "dashboards_versions_to_keep": { + Description: "Dashboard versions to keep per dashboard.", + Optional: true, + Type: schema.TypeInt, + }, + "dataproxy_send_user_header": { + Description: "Send 'X-Grafana-User' header to data source.", + Optional: true, + Type: schema.TypeBool, + }, + "dataproxy_timeout": { + Description: "Timeout for data proxy requests in seconds.", + Optional: true, + Type: schema.TypeInt, + }, + "date_formats": { + Description: "Grafana date format specifications", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "default_timezone": { + Description: "Default time zone for user preferences. Value 'browser' uses browser local time zone.", + Optional: true, + Type: schema.TypeString, + }, + "full_date": { + Description: "Moment.js style format string for cases where full date is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_day": { + Description: "Moment.js style format string used when a time requiring day accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_hour": { + Description: "Moment.js style format string used when a time requiring hour accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_minute": { + Description: "Moment.js style format string used when a time requiring minute accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_month": { + Description: "Moment.js style format string used when a time requiring month accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_second": { + Description: "Moment.js style format string used when a time requiring second accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_year": { + Description: "Moment.js style format string used when a time requiring year accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "disable_gravatar": { + Description: "Set to true to disable gravatar. Defaults to false (gravatar is enabled).", + Optional: true, + Type: schema.TypeBool, + }, + "editors_can_admin": { + Description: "Editors can manage folders, teams and dashboards created by them.", + Optional: true, + Type: schema.TypeBool, + }, + "external_image_storage": { + Description: "External image store settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "access_key": { + Description: "S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.", + Required: true, + Type: schema.TypeString, + }, + "bucket_url": { + Description: "Bucket URL for S3.", + Required: true, + Type: schema.TypeString, + }, + "provider": { + Description: "Provider type.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"s3"}, false), + }, + "secret_key": { + Description: "S3 secret key.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "google_analytics_ua_id": { + Description: "Google Analytics ID.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "metrics_enabled": { + Description: "Enable Grafana /metrics endpoint.", + Optional: true, + Type: schema.TypeBool, + }, + "oauth_allow_insecure_email_lookup": { + Description: "Enforce user lookup based on email instead of the unique ID provided by the IdP.", + Optional: true, + Type: schema.TypeBool, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { + Description: "Allow clients to connect to grafana with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { + Description: "Enable grafana.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { + Description: "Allow clients to connect to grafana from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "smtp_server": { + Description: "SMTP server settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "from_address": { + Description: "Address used for sending emails.", + Required: true, + Type: schema.TypeString, + }, + "from_name": { + Description: "Name used in outgoing emails, defaults to Grafana.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Server hostname or IP.", + Required: true, + Type: schema.TypeString, + }, + "password": { + Description: "Password for SMTP authentication.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "SMTP server port.", + Required: true, + Type: schema.TypeInt, + }, + "skip_verify": { + Description: "Skip verifying server certificate. Defaults to false.", + Optional: true, + Type: schema.TypeBool, + }, + "starttls_policy": { + Description: "Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"OpportunisticStartTLS", "MandatoryStartTLS", "NoStartTLS"}, false), + }, + "username": { + Description: "Username for SMTP authentication.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + "unified_alerting_enabled": { + Description: "Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.", + Optional: true, + Type: schema.TypeBool, + }, + "user_auto_assign_org": { + Description: "Auto-assign new users on signup to main organization. Defaults to false.", + Optional: true, + Type: schema.TypeBool, + }, + "user_auto_assign_org_role": { + Description: "Set role for new signups. Defaults to Viewer.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"Viewer", "Admin", "Editor"}, false), + }, + "viewers_can_edit": { + Description: "Users with view-only permission can edit but not save dashboards.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/influxdb.go b/internal/sdkprovider/userconfig/service/influxdb.go new file mode 100644 index 000000000..d8c65b9ee --- /dev/null +++ b/internal/sdkprovider/userconfig/service/influxdb.go @@ -0,0 +1,173 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func influxdbUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Influxdb user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "influxdb": { + Description: "influxdb.conf configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "log_queries_after": { + Description: "The maximum duration in seconds before a query is logged as a slow query. Setting this to 0 (the default) will never log slow queries.", + Optional: true, + Type: schema.TypeInt, + }, + "max_connection_limit": { + Description: "Maximum number of connections to InfluxDB. Setting this to 0 (default) means no limit. If using max_connection_limit, it is recommended to set the value to be large enough in order to not block clients unnecessarily.", + Optional: true, + Type: schema.TypeInt, + }, + "max_row_limit": { + Description: "The maximum number of rows returned in a non-chunked query. Setting this to 0 (the default) allows an unlimited number to be returned.", + Optional: true, + Type: schema.TypeInt, + }, + "max_select_buckets": { + Description: "The maximum number of `GROUP BY time()` buckets that can be processed in a query. Setting this to 0 (the default) allows an unlimited number to be processed.", + Optional: true, + Type: schema.TypeInt, + }, + "max_select_point": { + Description: "The maximum number of points that can be processed in a SELECT statement. Setting this to 0 (the default) allows an unlimited number to be processed.", + Optional: true, + Type: schema.TypeInt, + }, + "query_log_enabled": { + Description: "Whether queries should be logged before execution. May log sensitive data contained within a query.", + Optional: true, + Type: schema.TypeBool, + }, + "query_timeout": { + Description: "The maximum duration in seconds before a query is killed. Setting this to 0 (the default) will never kill slow queries.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { + Description: "Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { + Description: "Enable influxdb.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { + Description: "Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/kafka.go b/internal/sdkprovider/userconfig/service/kafka.go new file mode 100644 index 000000000..fecc84993 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/kafka.go @@ -0,0 +1,669 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func kafkaUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Kafka user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "aiven_kafka_topic_messages": { + Description: "Allow access to read Kafka topic messages in the Aiven Console and REST API.", + Optional: true, + Type: schema.TypeBool, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "kafka": { + Description: "Kafka broker configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "auto_create_topics_enable": { + Description: "Enable auto creation of topics.", + Optional: true, + Type: schema.TypeBool, + }, + "compression_type": { + Description: "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "uncompressed", "producer"}, false), + }, + "connections_max_idle_ms": { + Description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.", + Optional: true, + Type: schema.TypeInt, + }, + "default_replication_factor": { + Description: "Replication factor for autocreated topics.", + Optional: true, + Type: schema.TypeInt, + }, + "group_initial_rebalance_delay_ms": { + Description: "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.", + Optional: true, + Type: schema.TypeInt, + }, + "group_max_session_timeout_ms": { + Description: "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + Optional: true, + Type: schema.TypeInt, + }, + "group_min_session_timeout_ms": { + Description: "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleaner_delete_retention_ms": { + Description: "How long are delete records retained?", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleaner_max_compaction_lag_ms": { + Description: "The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleaner_min_cleanable_ratio": { + Description: "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.", + Optional: true, + Type: schema.TypeFloat, + }, + "log_cleaner_min_compaction_lag_ms": { + Description: "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleanup_policy": { + Description: "The default cleanup policy for segments beyond the retention window.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"delete", "compact", "compact,delete"}, false), + }, + "log_flush_interval_messages": { + Description: "The number of messages accumulated on a log partition before messages are flushed to disk.", + Optional: true, + Type: schema.TypeInt, + }, + "log_flush_interval_ms": { + Description: "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.", + Optional: true, + Type: schema.TypeInt, + }, + "log_index_interval_bytes": { + Description: "The interval with which Kafka adds an entry to the offset index.", + Optional: true, + Type: schema.TypeInt, + }, + "log_index_size_max_bytes": { + Description: "The maximum size in bytes of the offset index.", + Optional: true, + Type: schema.TypeInt, + }, + "log_local_retention_bytes": { + Description: "The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.", + Optional: true, + Type: schema.TypeInt, + }, + "log_local_retention_ms": { + Description: "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.", + Optional: true, + Type: schema.TypeInt, + }, + "log_message_downconversion_enable": { + Description: "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .", + Optional: true, + Type: schema.TypeBool, + }, + "log_message_timestamp_difference_max_ms": { + Description: "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.", + Optional: true, + Type: schema.TypeInt, + }, + "log_message_timestamp_type": { + Description: "Define whether the timestamp in the message is message create time or log append time.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"CreateTime", "LogAppendTime"}, false), + }, + "log_preallocate": { + Description: "Should pre allocate file when create new segment?", + Optional: true, + Type: schema.TypeBool, + }, + "log_retention_bytes": { + Description: "The maximum size of the log before deleting messages.", + Optional: true, + Type: schema.TypeInt, + }, + "log_retention_hours": { + Description: "The number of hours to keep a log file before deleting it.", + Optional: true, + Type: schema.TypeInt, + }, + "log_retention_ms": { + Description: "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.", + Optional: true, + Type: schema.TypeInt, + }, + "log_roll_jitter_ms": { + Description: "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.", + Optional: true, + Type: schema.TypeInt, + }, + "log_roll_ms": { + Description: "The maximum time before a new log segment is rolled out (in milliseconds).", + Optional: true, + Type: schema.TypeInt, + }, + "log_segment_bytes": { + Description: "The maximum size of a single log file.", + Optional: true, + Type: schema.TypeInt, + }, + "log_segment_delete_delay_ms": { + Description: "The amount of time to wait before deleting a file from the filesystem.", + Optional: true, + Type: schema.TypeInt, + }, + "max_connections_per_ip": { + Description: "The maximum number of connections allowed from each ip address (defaults to 2147483647).", + Optional: true, + Type: schema.TypeInt, + }, + "max_incremental_fetch_session_cache_slots": { + Description: "The maximum number of incremental fetch sessions that the broker will maintain.", + Optional: true, + Type: schema.TypeInt, + }, + "message_max_bytes": { + Description: "The maximum size of message that the server can receive.", + Optional: true, + Type: schema.TypeInt, + }, + "min_insync_replicas": { + Description: "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.", + Optional: true, + Type: schema.TypeInt, + }, + "num_partitions": { + Description: "Number of partitions for autocreated topics.", + Optional: true, + Type: schema.TypeInt, + }, + "offsets_retention_minutes": { + Description: "Log retention window in minutes for offsets topic.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_purgatory_purge_interval_requests": { + Description: "The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).", + Optional: true, + Type: schema.TypeInt, + }, + "replica_fetch_max_bytes": { + Description: "The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.", + Optional: true, + Type: schema.TypeInt, + }, + "replica_fetch_response_max_bytes": { + Description: "Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.", + Optional: true, + Type: schema.TypeInt, + }, + "sasl_oauthbearer_expected_audience": { + Description: "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_expected_issuer": { + Description: "Optional setting for the broker to use to verify that the JWT was created by the expected issuer.", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_jwks_endpoint_url": { + Description: "OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_sub_claim_name": { + Description: "Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.", + Optional: true, + Type: schema.TypeString, + }, + "socket_request_max_bytes": { + Description: "The maximum number of bytes in a socket request (defaults to 104857600).", + Optional: true, + Type: schema.TypeInt, + }, + "transaction_remove_expired_transaction_cleanup_interval_ms": { + Description: "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).", + Optional: true, + Type: schema.TypeInt, + }, + "transaction_state_log_segment_bytes": { + Description: "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_authentication_methods": { + Description: "Kafka authentication methods", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "certificate": { + Default: true, + Description: "Enable certificate/SSL authentication. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "sasl": { + Default: false, + Description: "Enable SASL authentication. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_connect": { + Default: false, + Description: "Enable Kafka Connect service. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect_config": { + Description: "Kafka Connect configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "connector_client_config_override_policy": { + Description: "Defines what client configurations can be overridden by the connector. Default is None.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + }, + "consumer_auto_offset_reset": { + Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"earliest", "latest"}, false), + }, + "consumer_fetch_max_bytes": { + Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_isolation_level": { + Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + }, + "consumer_max_partition_fetch_bytes": { + Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_interval_ms": { + Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_records": { + Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_interval_ms": { + Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_timeout_ms": { + Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_batch_size": { + Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_buffer_memory": { + Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_compression_type": { + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + }, + "producer_linger_ms": { + Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_max_request_size": { + Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", + Optional: true, + Type: schema.TypeInt, + }, + "scheduled_rebalance_max_delay_ms": { + Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", + Optional: true, + Type: schema.TypeInt, + }, + "session_timeout_ms": { + Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_rest": { + Default: false, + Description: "Enable Kafka-REST service. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest_authorization": { + Description: "Enable authorization in Kafka-REST service.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest_config": { + Description: "Kafka REST configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "consumer_enable_auto_commit": { + Default: true, + Description: "If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "consumer_request_max_bytes": { + Default: 67108864, + Description: "Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_request_timeout_ms": { + Default: 1000, + Description: "The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.", + Optional: true, + Type: schema.TypeInt, + ValidateFunc: validation.IntInSlice([]int{1000, 15000, 30000}), + }, + "producer_acks": { + Default: "1", + Description: "The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"all", "-1", "0", "1"}, false), + }, + "producer_compression_type": { + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + }, + "producer_linger_ms": { + Default: 0, + Description: "Wait for up to the given delay to allow batching records together. The default value is `0`.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_max_request_size": { + Default: 1048576, + Description: "The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.", + Optional: true, + Type: schema.TypeInt, + }, + "simpleconsumer_pool_size_max": { + Default: 25, + Description: "Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_version": { + Description: "Kafka major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"3.2", "3.3", "3.1", "3.4", "3.5", "3.6"}, false), + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka": { + Description: "Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest": { + Description: "Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry": { + Description: "Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "jolokia": { + Description: "Enable jolokia.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka": { + Description: "Enable kafka.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Enable kafka_connect.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest": { + Description: "Enable kafka_rest.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry": { + Description: "Enable schema_registry.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka": { + Description: "Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest": { + Description: "Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry": { + Description: "Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "schema_registry": { + Default: false, + Description: "Enable Schema-Registry service. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry_config": { + Description: "Schema Registry configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "leader_eligibility": { + Description: "If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "topic_name": { + Description: "The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to `_schemas`.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + "tiered_storage": { + Description: "Tiered storage configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Description: "Whether to enable the tiered storage functionality.", + Optional: true, + Type: schema.TypeBool, + }, + "local_cache": { + Deprecated: "This property is deprecated.", + Description: "Local cache configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"size": { + Deprecated: "This property is deprecated.", + Description: "Local cache size in bytes.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/kafka_connect.go b/internal/sdkprovider/userconfig/service/kafka_connect.go new file mode 100644 index 000000000..0a2e70a2e --- /dev/null +++ b/internal/sdkprovider/userconfig/service/kafka_connect.go @@ -0,0 +1,227 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func kafkaConnectUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "KafkaConnect user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "kafka_connect": { + Description: "Kafka Connect configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "connector_client_config_override_policy": { + Description: "Defines what client configurations can be overridden by the connector. Default is None.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + }, + "consumer_auto_offset_reset": { + Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"earliest", "latest"}, false), + }, + "consumer_fetch_max_bytes": { + Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_isolation_level": { + Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + }, + "consumer_max_partition_fetch_bytes": { + Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_interval_ms": { + Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_records": { + Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_interval_ms": { + Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_timeout_ms": { + Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_batch_size": { + Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_buffer_memory": { + Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_compression_type": { + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + }, + "producer_linger_ms": { + Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_max_request_size": { + Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", + Optional: true, + Type: schema.TypeInt, + }, + "scheduled_rebalance_max_delay_ms": { + Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", + Optional: true, + Type: schema.TypeInt, + }, + "session_timeout_ms": { + Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "jolokia": { + Description: "Enable jolokia.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Enable kafka_connect.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/kafka_mirrormaker.go b/internal/sdkprovider/userconfig/service/kafka_mirrormaker.go new file mode 100644 index 000000000..a238c9c4b --- /dev/null +++ b/internal/sdkprovider/userconfig/service/kafka_mirrormaker.go @@ -0,0 +1,134 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func kafkaMirrormakerUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "KafkaMirrormaker user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "kafka_mirrormaker": { + Description: "Kafka MirrorMaker configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "emit_checkpoints_enabled": { + Description: "Whether to emit consumer group offset checkpoints to target cluster periodically (default: true).", + Optional: true, + Type: schema.TypeBool, + }, + "emit_checkpoints_interval_seconds": { + Description: "Frequency at which consumer group offset checkpoints are emitted (default: 60, every minute).", + Optional: true, + Type: schema.TypeInt, + }, + "refresh_groups_enabled": { + Description: "Whether to periodically check for new consumer groups. Defaults to 'true'.", + Optional: true, + Type: schema.TypeBool, + }, + "refresh_groups_interval_seconds": { + Description: "Frequency of consumer group refresh in seconds. Defaults to 600 seconds (10 minutes).", + Optional: true, + Type: schema.TypeInt, + }, + "refresh_topics_enabled": { + Description: "Whether to periodically check for new topics and partitions. Defaults to 'true'.", + Optional: true, + Type: schema.TypeBool, + }, + "refresh_topics_interval_seconds": { + Description: "Frequency of topic and partitions refresh in seconds. Defaults to 600 seconds (10 minutes).", + Optional: true, + Type: schema.TypeInt, + }, + "sync_group_offsets_enabled": { + Description: "Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.", + Optional: true, + Type: schema.TypeBool, + }, + "sync_group_offsets_interval_seconds": { + Description: "Frequency at which consumer group offsets are synced (default: 60, every minute).", + Optional: true, + Type: schema.TypeInt, + }, + "sync_topic_configs_enabled": { + Description: "Whether to periodically configure remote topics to match their corresponding upstream topics.", + Optional: true, + Type: schema.TypeBool, + }, + "tasks_max_per_cpu": { + Default: 1, + Description: "'tasks.max' is set to this multiplied by the number of CPUs in the service. The default value is `1`.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/m3aggregator.go b/internal/sdkprovider/userconfig/service/m3aggregator.go new file mode 100644 index 000000000..e0b031016 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/m3aggregator.go @@ -0,0 +1,83 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func m3AggregatorUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "M3Aggregator user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "m3_version": { + Description: "M3 major version (deprecated, use m3aggregator_version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "m3aggregator_version": { + Description: "M3 major version (the minimum compatible version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/m3db.go b/internal/sdkprovider/userconfig/service/m3db.go new file mode 100644 index 000000000..50bacb6d5 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/m3db.go @@ -0,0 +1,366 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func m3DbUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "M3Db user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "limits": { + Description: "M3 limits", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "max_recently_queried_series_blocks": { + Description: "The maximum number of blocks that can be read in a given lookback period.", + Optional: true, + Type: schema.TypeInt, + }, + "max_recently_queried_series_disk_bytes_read": { + Description: "The maximum number of disk bytes that can be read in a given lookback period.", + Optional: true, + Type: schema.TypeInt, + }, + "max_recently_queried_series_lookback": { + Description: "The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.", + Optional: true, + Type: schema.TypeString, + }, + "query_docs": { + Description: "The maximum number of docs fetched in single query.", + Optional: true, + Type: schema.TypeInt, + }, + "query_require_exhaustive": { + Description: "When query limits are exceeded, whether to return error or return partial results.", + Optional: true, + Type: schema.TypeBool, + }, + "query_series": { + Description: "The maximum number of series fetched in single query.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "m3": { + Description: "M3 specific configuration options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"tag_options": { + Description: "M3 Tag Options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_tag_name_duplicates": { + Description: "Allows for duplicate tags to appear on series (not allowed by default).", + Optional: true, + Type: schema.TypeBool, + }, + "allow_tag_value_empty": { + Description: "Allows for empty tags to appear on series (not allowed by default).", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "m3_version": { + Description: "M3 major version (deprecated, use m3db_version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "m3coordinator_enable_graphite_carbon_ingest": { + Description: "Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.", + Optional: true, + Type: schema.TypeBool, + }, + "m3db_version": { + Description: "M3 major version (the minimum compatible version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "namespaces": { + Description: "List of M3 namespaces", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the namespace.", + Required: true, + Type: schema.TypeString, + }, + "options": { + Description: "Namespace options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "retention_options": { + Description: "Retention options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "block_data_expiry_duration": { + Description: "Controls how long we wait before expiring stale data.", + Optional: true, + Type: schema.TypeString, + }, + "blocksize_duration": { + Description: "Controls how long to keep a block in memory before flushing to a fileset on disk.", + Optional: true, + Type: schema.TypeString, + }, + "buffer_future_duration": { + Description: "Controls how far into the future writes to the namespace will be accepted.", + Optional: true, + Type: schema.TypeString, + }, + "buffer_past_duration": { + Description: "Controls how far into the past writes to the namespace will be accepted.", + Optional: true, + Type: schema.TypeString, + }, + "retention_period_duration": { + Description: "Controls the duration of time that M3DB will retain data for the namespace.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Required: true, + Type: schema.TypeList, + }, + "snapshot_enabled": { + Description: "Controls whether M3DB will create snapshot files for this namespace.", + Optional: true, + Type: schema.TypeBool, + }, + "writes_to_commitlog": { + Description: "Controls whether M3DB will include writes to this namespace in the commitlog.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "resolution": { + Description: "The resolution for an aggregated namespace.", + Optional: true, + Type: schema.TypeString, + }, + "type": { + Description: "The type of aggregation (aggregated/unaggregated).", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"aggregated", "unaggregated"}, false), + }, + }}, + MaxItems: 2147483647, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"m3coordinator": { + Description: "Allow clients to connect to m3coordinator with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"m3coordinator": { + Description: "Allow clients to connect to m3coordinator from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "rules": { + Description: "M3 rules", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"mapping": { + Description: "List of M3 mapping rules", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "aggregations": { + Description: "List of aggregations to be applied.", + Elem: &schema.Schema{ + Description: "Aggregation to be applied.", + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"Count", "Last", "Max", "Mean", "Median", "Min", "P10", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "P90", "P95", "P99", "P999", "P9999", "Stdev", "Sum", "SumSq"}, false), + }, + MaxItems: 10, + Optional: true, + Type: schema.TypeSet, + }, + "drop": { + Description: "Only store the derived metric (as specified in the roll-up rules), if any.", + Optional: true, + Type: schema.TypeBool, + }, + "filter": { + Description: "Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.", + Required: true, + Type: schema.TypeString, + }, + "name": { + Description: "The (optional) name of the rule.", + Optional: true, + Type: schema.TypeString, + }, + "namespaces": { + Deprecated: "Deprecated. Use `namespaces_string` instead.", + Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", + Elem: &schema.Schema{ + Description: "Filter the namespace by glob (=wildcards).", + Type: schema.TypeString, + }, + MaxItems: 10, + Optional: true, + Type: schema.TypeSet, + }, + "namespaces_object": { + Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "resolution": { + Description: "The resolution for the matching namespace.", + Required: true, + Type: schema.TypeString, + }, + "retention": { + Description: "The retention period of the matching namespace.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 10, + Optional: true, + Type: schema.TypeList, + }, + "namespaces_string": { + Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", + Elem: &schema.Schema{ + Description: "Filter the namespace by glob (=wildcards).", + Type: schema.TypeString, + }, + MaxItems: 10, + Optional: true, + Type: schema.TypeSet, + }, + "tags": { + Description: "List of tags to be appended to matching metrics", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "name": { + Description: "Name of the tag.", + Required: true, + Type: schema.TypeString, + }, + "value": { + Description: "Value of the tag.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 10, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 10, + Optional: true, + Type: schema.TypeList, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/mysql.go b/internal/sdkprovider/userconfig/service/mysql.go new file mode 100644 index 000000000..7e28fca65 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/mysql.go @@ -0,0 +1,407 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func mysqlUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Mysql user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "admin_password": { + Description: "Custom password for admin user. Defaults to random string. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "admin_username": { + Description: "Custom username for admin user. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "backup_hour": { + Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "backup_minute": { + Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "binlog_retention_period": { + Description: "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + Optional: true, + Type: schema.TypeInt, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migration": { + Description: "Migrate data from existing server", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "dbname": { + Description: "Database name for bootstrapping the initial connection.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Hostname or IP address of the server where to migrate data from.", + Required: true, + Type: schema.TypeString, + }, + "ignore_dbs": { + Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", + Optional: true, + Type: schema.TypeString, + }, + "method": { + Description: "The migration method to be used (currently supported only by Redis, MySQL and PostgreSQL service types).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), + }, + "password": { + Description: "Password for authentication with the server where to migrate data from.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "Port number of the server where to migrate data from.", + Required: true, + Type: schema.TypeInt, + }, + "ssl": { + Default: true, + Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "username": { + Description: "User name for authentication with the server where to migrate data from.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "mysql": { + Description: "mysql.conf configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "connect_timeout": { + Description: "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake.", + Optional: true, + Type: schema.TypeInt, + }, + "default_time_zone": { + Description: "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + Optional: true, + Type: schema.TypeString, + }, + "group_concat_max_len": { + Description: "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + Optional: true, + Type: schema.TypeInt, + }, + "information_schema_stats_expiry": { + Description: "The time, in seconds, before cached statistics expire.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_change_buffer_max_size": { + Description: "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_flush_neighbors": { + Description: "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_ft_min_token_size": { + Description: "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_ft_server_stopword_table": { + Description: "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + Optional: true, + Type: schema.TypeString, + }, + "innodb_lock_wait_timeout": { + Description: "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_log_buffer_size": { + Description: "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_online_alter_log_max_size": { + Description: "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_print_all_deadlocks": { + Description: "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", + Optional: true, + Type: schema.TypeBool, + }, + "innodb_read_io_threads": { + Description: "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_rollback_on_timeout": { + Description: "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeBool, + }, + "innodb_thread_concurrency": { + Description: "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_write_io_threads": { + Description: "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "interactive_timeout": { + Description: "The number of seconds the server waits for activity on an interactive connection before closing it.", + Optional: true, + Type: schema.TypeInt, + }, + "internal_tmp_mem_storage_engine": { + Description: "The storage engine for in-memory internal temporary tables.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"TempTable", "MEMORY"}, false), + }, + "long_query_time": { + Description: "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.", + Optional: true, + Type: schema.TypeFloat, + }, + "max_allowed_packet": { + Description: "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).", + Optional: true, + Type: schema.TypeInt, + }, + "max_heap_table_size": { + Description: "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).", + Optional: true, + Type: schema.TypeInt, + }, + "net_buffer_length": { + Description: "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "net_read_timeout": { + Description: "The number of seconds to wait for more data from a connection before aborting the read.", + Optional: true, + Type: schema.TypeInt, + }, + "net_write_timeout": { + Description: "The number of seconds to wait for a block to be written to a connection before aborting the write.", + Optional: true, + Type: schema.TypeInt, + }, + "slow_query_log": { + Description: "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.", + Optional: true, + Type: schema.TypeBool, + }, + "sort_buffer_size": { + Description: "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K).", + Optional: true, + Type: schema.TypeInt, + }, + "sql_mode": { + Description: "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.", + Optional: true, + Type: schema.TypeString, + }, + "sql_require_primary_key": { + Description: "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + Optional: true, + Type: schema.TypeBool, + }, + "tmp_table_size": { + Description: "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).", + Optional: true, + Type: schema.TypeInt, + }, + "wait_timeout": { + Description: "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "mysql_version": { + Description: "MySQL major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"8"}, false), + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mysql": { + Description: "Allow clients to connect to mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "mysqlx": { + Description: "Allow clients to connect to mysqlx with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mysql": { + Description: "Enable mysql.", + Optional: true, + Type: schema.TypeBool, + }, + "mysqlx": { + Description: "Enable mysqlx.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mysql": { + Description: "Allow clients to connect to mysql from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "mysqlx": { + Description: "Allow clients to connect to mysqlx from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_target_time": { + Description: "Recovery target time when forking a service. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/opensearch.go b/internal/sdkprovider/userconfig/service/opensearch.go new file mode 100644 index 000000000..327ada9c9 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/opensearch.go @@ -0,0 +1,674 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func opensearchUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Opensearch user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "disable_replication_factor_adjustment": { + Description: "Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.", + Optional: true, + Type: schema.TypeBool, + }, + "index_patterns": { + Description: "Index patterns", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "max_index_count": { + Description: "Maximum number of indexes to keep.", + Required: true, + Type: schema.TypeInt, + }, + "pattern": { + Description: "fnmatch pattern.", + Required: true, + Type: schema.TypeString, + }, + "sorting_algorithm": { + Default: "creation_date", + Description: "Deletion sorting algorithm. The default value is `creation_date`.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"alphabetical", "creation_date"}, false), + }, + }}, + MaxItems: 512, + Optional: true, + Type: schema.TypeList, + }, + "index_template": { + Description: "Template settings for all new indexes", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mapping_nested_objects_limit": { + Description: "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. Default is 10000.", + Optional: true, + Type: schema.TypeInt, + }, + "number_of_replicas": { + Description: "The number of replicas each primary shard has.", + Optional: true, + Type: schema.TypeInt, + }, + "number_of_shards": { + Description: "The number of primary shards that an index should have.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "keep_index_refresh_interval": { + Description: "Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.", + Optional: true, + Type: schema.TypeBool, + }, + "max_index_count": { + Default: 0, + Description: "use index_patterns instead. The default value is `0`.", + Optional: true, + Type: schema.TypeInt, + }, + "openid": { + Description: "OpenSearch OpenID Connect Configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "client_id": { + Description: "The ID of the OpenID Connect client configured in your IdP. Required.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "The client secret of the OpenID Connect client configured in your IdP. Required.", + Required: true, + Type: schema.TypeString, + }, + "connect_url": { + Description: "The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings.", + Required: true, + Type: schema.TypeString, + }, + "enabled": { + Default: true, + Description: "Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "header": { + Default: "Authorization", + Description: "HTTP header name of the JWT token. Optional. Default is Authorization. The default value is `Authorization`.", + Optional: true, + Type: schema.TypeString, + }, + "jwt_header": { + Description: "The HTTP header that stores the token. Typically the Authorization header with the Bearer schema: Authorization: Bearer . Optional. Default is Authorization.", + Optional: true, + Type: schema.TypeString, + }, + "jwt_url_parameter": { + Description: "If the token is not transmitted in the HTTP header, but as an URL parameter, define the name of the parameter here. Optional.", + Optional: true, + Type: schema.TypeString, + }, + "refresh_rate_limit_count": { + Default: 10, + Description: "The maximum number of unknown key IDs in the time frame. Default is 10. Optional. The default value is `10`.", + Optional: true, + Type: schema.TypeInt, + }, + "refresh_rate_limit_time_window_ms": { + Default: 10000, + Description: "The time frame to use when checking the maximum number of unknown key IDs, in milliseconds. Optional.Default is 10000 (10 seconds). The default value is `10000`.", + Optional: true, + Type: schema.TypeInt, + }, + "roles_key": { + Description: "The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT.", + Optional: true, + Type: schema.TypeString, + }, + "scope": { + Description: "The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.", + Optional: true, + Type: schema.TypeString, + }, + "subject_key": { + Description: "The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "opensearch": { + Description: "OpenSearch settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "action_auto_create_index_enabled": { + Description: "Explicitly allow or block automatic creation of indices. Defaults to true.", + Optional: true, + Type: schema.TypeBool, + }, + "action_destructive_requires_name": { + Description: "Require explicit index names when deleting.", + Optional: true, + Type: schema.TypeBool, + }, + "auth_failure_listeners": { + Description: "Opensearch Security Plugin Settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "internal_authentication_backend_limiting": { + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "internal_authentication_backend_limiting.authentication_backend.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"internal"}, false), + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "internal_authentication_backend_limiting.max_blocked_clients.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "internal_authentication_backend_limiting.type.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"username"}, false), + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_rate_limiting": { + Description: "IP address rate limiting settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ip"}, false), + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "cluster_max_shards_per_node": { + Description: "Controls the number of shards allowed in the cluster per data node.", + Optional: true, + Type: schema.TypeInt, + }, + "cluster_routing_allocation_node_concurrent_recoveries": { + Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", + Optional: true, + Type: schema.TypeInt, + }, + "email_sender_name": { + Description: "Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore.", + Optional: true, + Type: schema.TypeString, + }, + "email_sender_password": { + Description: "Sender password for Opensearch alerts to authenticate with SMTP server.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "email_sender_username": { + Description: "Sender username for Opensearch alerts.", + Optional: true, + Type: schema.TypeString, + }, + "http_max_content_length": { + Description: "Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "http_max_header_size": { + Description: "The max size of allowed headers, in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "http_max_initial_line_length": { + Description: "The max length of an HTTP URL, in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_fielddata_cache_size": { + Description: "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_memory_index_buffer_size": { + Description: "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_queries_cache_size": { + Description: "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_query_bool_max_clause_count": { + Description: "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_recovery_max_bytes_per_sec": { + Description: "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_recovery_max_concurrent_file_chunks": { + Description: "Number of file chunks sent in parallel for each recovery. Defaults to 2.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_enabled": { + Default: true, + Description: "Specifies whether ISM is enabled or not. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "ism_history_enabled": { + Default: true, + Description: "Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "ism_history_max_age": { + Default: 24, + Description: "The maximum age before rolling over the audit history index in hours. The default value is `24`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_max_docs": { + Default: 2500000, + Description: "The maximum number of documents before rolling over the audit history index. The default value is `2500000`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_rollover_check_period": { + Default: 8, + Description: "The time between rollover checks for the audit history index in hours. The default value is `8`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_rollover_retention_period": { + Default: 30, + Description: "How long audit history indices are kept in days. The default value is `30`.", + Optional: true, + Type: schema.TypeInt, + }, + "override_main_response_version": { + Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", + Optional: true, + Type: schema.TypeBool, + }, + "reindex_remote_whitelist": { + Description: "Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.", + Elem: &schema.Schema{ + Description: "Address (hostname:port or IP:port).", + Type: schema.TypeString, + }, + MaxItems: 32, + Optional: true, + Type: schema.TypeSet, + }, + "script_max_compilations_rate": { + Description: "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context.", + Optional: true, + Type: schema.TypeString, + }, + "search_max_buckets": { + Description: "Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_analyze_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_analyze_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_force_merge_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_get_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_get_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_throttled_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_throttled_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_write_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_write_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "opensearch_dashboards": { + Description: "OpenSearch Dashboards settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Default: true, + Description: "Enable or disable OpenSearch Dashboards. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "max_old_space_size": { + Default: 128, + Description: "Limits the maximum amount of memory (in MiB) the OpenSearch Dashboards process can use. This sets the max_old_space_size option of the nodejs running the OpenSearch Dashboards. Note: the memory reserved by OpenSearch Dashboards is not available for OpenSearch. The default value is `128`.", + Optional: true, + Type: schema.TypeInt, + }, + "opensearch_request_timeout": { + Default: 30000, + Description: "Timeout in milliseconds for requests made by OpenSearch Dashboards towards OpenSearch. The default value is `30000`.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "opensearch_version": { + Description: "OpenSearch major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1", "2"}, false), + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "opensearch": { + Description: "Allow clients to connect to opensearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "opensearch_dashboards": { + Description: "Allow clients to connect to opensearch_dashboards with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "opensearch": { + Description: "Enable opensearch.", + Optional: true, + Type: schema.TypeBool, + }, + "opensearch_dashboards": { + Description: "Enable opensearch_dashboards.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "opensearch": { + Description: "Allow clients to connect to opensearch from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "opensearch_dashboards": { + Description: "Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "saml": { + Description: "OpenSearch SAML configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Description: "Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. The default value is `true`.", + Required: true, + Type: schema.TypeBool, + }, + "idp_entity_id": { + Description: "The unique identifier for the Identity Provider (IdP) entity that is used for SAML authentication. This value is typically provided by the IdP.", + Required: true, + Type: schema.TypeString, + }, + "idp_metadata_url": { + Description: "The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP.", + Required: true, + Type: schema.TypeString, + }, + "idp_pemtrustedcas_content": { + Description: "This parameter specifies the PEM-encoded root certificate authority (CA) content for the SAML identity provider (IdP) server verification. The root CA content is used to verify the SSL/TLS certificate presented by the server.", + Optional: true, + Type: schema.TypeString, + }, + "roles_key": { + Description: "Optional. Specifies the attribute in the SAML response where role information is stored, if available. Role attributes are not required for SAML authentication, but can be included in SAML assertions by most Identity Providers (IdPs) to determine user access levels or permissions.", + Optional: true, + Type: schema.TypeString, + }, + "sp_entity_id": { + Description: "The unique identifier for the Service Provider (SP) entity that is used for SAML authentication. This value is typically provided by the SP.", + Required: true, + Type: schema.TypeString, + }, + "subject_key": { + Description: "Optional. Specifies the attribute in the SAML response where the subject identifier is stored. If not configured, the NameID attribute is used by default.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/pg.go b/internal/sdkprovider/userconfig/service/pg.go new file mode 100644 index 000000000..e33fc7f00 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/pg.go @@ -0,0 +1,630 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func pgUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Pg user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "admin_password": { + Description: "Custom password for admin user. Defaults to random string. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "admin_username": { + Description: "Custom username for admin user. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "backup_hour": { + Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "backup_minute": { + Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "enable_ipv6": { + Description: "Register AAAA DNS records for the service, and allow IPv6 packets to service ports.", + Optional: true, + Type: schema.TypeBool, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migration": { + Description: "Migrate data from existing server", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "dbname": { + Description: "Database name for bootstrapping the initial connection.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Hostname or IP address of the server where to migrate data from.", + Required: true, + Type: schema.TypeString, + }, + "ignore_dbs": { + Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", + Optional: true, + Type: schema.TypeString, + }, + "method": { + Description: "The migration method to be used (currently supported only by Redis, MySQL and PostgreSQL service types).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), + }, + "password": { + Description: "Password for authentication with the server where to migrate data from.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "Port number of the server where to migrate data from.", + Required: true, + Type: schema.TypeInt, + }, + "ssl": { + Default: true, + Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "username": { + Description: "User name for authentication with the server where to migrate data from.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "pg": { + Description: "postgresql.conf configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "autovacuum_analyze_scale_factor": { + Description: "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).", + Optional: true, + Type: schema.TypeFloat, + }, + "autovacuum_analyze_threshold": { + Description: "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_freeze_max_age": { + Description: "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_max_workers": { + Description: "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_naptime": { + Description: "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_vacuum_cost_delay": { + Description: "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_vacuum_cost_limit": { + Description: "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_vacuum_scale_factor": { + Description: "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).", + Optional: true, + Type: schema.TypeFloat, + }, + "autovacuum_vacuum_threshold": { + Description: "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_delay": { + Description: "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_flush_after": { + Description: "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_lru_maxpages": { + Description: "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_lru_multiplier": { + Description: "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + Optional: true, + Type: schema.TypeFloat, + }, + "deadlock_timeout": { + Description: "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + Optional: true, + Type: schema.TypeInt, + }, + "default_toast_compression": { + Description: "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"lz4", "pglz"}, false), + }, + "idle_in_transaction_session_timeout": { + Description: "Time out sessions with open transactions after this number of milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "jit": { + Description: "Controls system-wide use of Just-in-Time Compilation (JIT).", + Optional: true, + Type: schema.TypeBool, + }, + "log_autovacuum_min_duration": { + Description: "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", + Optional: true, + Type: schema.TypeInt, + }, + "log_error_verbosity": { + Description: "Controls the amount of detail written in the server log for each message that is logged.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"TERSE", "DEFAULT", "VERBOSE"}, false), + }, + "log_line_prefix": { + Description: "Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'%m [%p] %q[user=%u,db=%d,app=%a] '"}, false), + }, + "log_min_duration_statement": { + Description: "Log statements that take more than this number of milliseconds to run, -1 disables.", + Optional: true, + Type: schema.TypeInt, + }, + "log_temp_files": { + Description: "Log statements for each temporary file created larger than this number of kilobytes, -1 disables.", + Optional: true, + Type: schema.TypeInt, + }, + "max_files_per_process": { + Description: "PostgreSQL maximum number of files that can be open per process.", + Optional: true, + Type: schema.TypeInt, + }, + "max_locks_per_transaction": { + Description: "PostgreSQL maximum locks per transaction.", + Optional: true, + Type: schema.TypeInt, + }, + "max_logical_replication_workers": { + Description: "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers).", + Optional: true, + Type: schema.TypeInt, + }, + "max_parallel_workers": { + Description: "Sets the maximum number of workers that the system can support for parallel queries.", + Optional: true, + Type: schema.TypeInt, + }, + "max_parallel_workers_per_gather": { + Description: "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node.", + Optional: true, + Type: schema.TypeInt, + }, + "max_pred_locks_per_transaction": { + Description: "PostgreSQL maximum predicate locks per transaction.", + Optional: true, + Type: schema.TypeInt, + }, + "max_prepared_transactions": { + Description: "PostgreSQL maximum prepared transactions.", + Optional: true, + Type: schema.TypeInt, + }, + "max_replication_slots": { + Description: "PostgreSQL maximum replication slots.", + Optional: true, + Type: schema.TypeInt, + }, + "max_slot_wal_keep_size": { + Description: "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + Optional: true, + Type: schema.TypeInt, + }, + "max_stack_depth": { + Description: "Maximum depth of the stack in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "max_standby_archive_delay": { + Description: "Max standby archive delay in milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "max_standby_streaming_delay": { + Description: "Max standby streaming delay in milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "max_wal_senders": { + Description: "PostgreSQL maximum WAL senders.", + Optional: true, + Type: schema.TypeInt, + }, + "max_worker_processes": { + Description: "Sets the maximum number of background processes that the system can support.", + Optional: true, + Type: schema.TypeInt, + }, + "pg_partman_bgw__dot__interval": { + Description: "Sets the time interval to run pg_partman's scheduled tasks.", + Optional: true, + Type: schema.TypeInt, + }, + "pg_partman_bgw__dot__role": { + Description: "Controls which role to use for pg_partman's scheduled background tasks.", + Optional: true, + Type: schema.TypeString, + }, + "pg_stat_monitor__dot__pgsm_enable_query_plan": { + Description: "Enables or disables query plan monitoring.", + Optional: true, + Type: schema.TypeBool, + }, + "pg_stat_monitor__dot__pgsm_max_buckets": { + Description: "Sets the maximum number of buckets .", + Optional: true, + Type: schema.TypeInt, + }, + "pg_stat_statements__dot__track": { + Description: "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"all", "top", "none"}, false), + }, + "temp_file_limit": { + Description: "PostgreSQL temporary file limit in KiB, -1 for unlimited.", + Optional: true, + Type: schema.TypeInt, + }, + "timezone": { + Description: "PostgreSQL service timezone.", + Optional: true, + Type: schema.TypeString, + }, + "track_activity_query_size": { + Description: "Specifies the number of bytes reserved to track the currently executing command for each active session.", + Optional: true, + Type: schema.TypeInt, + }, + "track_commit_timestamp": { + Description: "Record commit time of transactions.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), + }, + "track_functions": { + Description: "Enables tracking of function call counts and time used.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"all", "pl", "none"}, false), + }, + "track_io_timing": { + Description: "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), + }, + "wal_sender_timeout": { + Description: "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + Optional: true, + Type: schema.TypeInt, + }, + "wal_writer_delay": { + Description: "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "pg_read_replica": { + Description: "Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).", + Optional: true, + Type: schema.TypeBool, + }, + "pg_service_to_fork_from": { + Description: "Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "pg_stat_monitor_enable": { + Default: false, + Description: "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "pg_version": { + Description: "PostgreSQL major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"11", "12", "13", "14", "15", "10"}, false), + }, + "pgbouncer": { + Description: "PGBouncer connection pooling settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "autodb_idle_timeout": { + Description: "If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds).", + Optional: true, + Type: schema.TypeInt, + }, + "autodb_max_db_connections": { + Description: "Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.", + Optional: true, + Type: schema.TypeInt, + }, + "autodb_pool_mode": { + Description: "PGBouncer pool mode.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"session", "transaction", "statement"}, false), + }, + "autodb_pool_size": { + Description: "If non-zero then create automatically a pool of that size per user when a pool doesn't exist.", + Optional: true, + Type: schema.TypeInt, + }, + "ignore_startup_parameters": { + Description: "List of parameters to ignore when given in startup packet.", + Elem: &schema.Schema{ + Description: "Enum of parameters to ignore when given in startup packet.", + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"extra_float_digits", "search_path"}, false), + }, + MaxItems: 32, + Optional: true, + Type: schema.TypeSet, + }, + "min_pool_size": { + Description: "Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size.", + Optional: true, + Type: schema.TypeInt, + }, + "server_idle_timeout": { + Description: "If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds).", + Optional: true, + Type: schema.TypeInt, + }, + "server_lifetime": { + Description: "The pooler will close an unused server connection that has been connected longer than this. (seconds).", + Optional: true, + Type: schema.TypeInt, + }, + "server_reset_query_always": { + Description: "Run server_reset_query (DISCARD ALL) in all pooling modes.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "pglookout": { + Description: "PGLookout settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"max_failover_replication_time_lag": { + Default: 60, + Description: "Number of seconds of master unavailability before triggering database failover to standby. The default value is `60`.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "pg": { + Description: "Allow clients to connect to pg with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "pgbouncer": { + Description: "Allow clients to connect to pgbouncer with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "pg": { + Description: "Enable pg.", + Optional: true, + Type: schema.TypeBool, + }, + "pgbouncer": { + Description: "Enable pgbouncer.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "pg": { + Description: "Allow clients to connect to pg from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "pgbouncer": { + Description: "Allow clients to connect to pgbouncer from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_target_time": { + Description: "Recovery target time when forking a service. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "shared_buffers_percentage": { + Description: "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + Optional: true, + Type: schema.TypeFloat, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + "synchronous_replication": { + Description: "Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"quorum", "off"}, false), + }, + "timescaledb": { + Description: "TimescaleDB extension configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"max_background_workers": { + Description: "The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "variant": { + Description: "Variant of the PostgreSQL service, may affect the features that are exposed by default.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"aiven", "timescale"}, false), + }, + "work_mem": { + Description: "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/redis.go b/internal/sdkprovider/userconfig/service/redis.go new file mode 100644 index 000000000..76a737c39 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/redis.go @@ -0,0 +1,261 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" +) + +func redisUserConfig() *schema.Schema { + return &schema.Schema{ + Description: "Redis user configurable settings", + DiffSuppressFunc: diff.SuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migration": { + Description: "Migrate data from existing server", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "dbname": { + Description: "Database name for bootstrapping the initial connection.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Hostname or IP address of the server where to migrate data from.", + Required: true, + Type: schema.TypeString, + }, + "ignore_dbs": { + Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", + Optional: true, + Type: schema.TypeString, + }, + "method": { + Description: "The migration method to be used (currently supported only by Redis, MySQL and PostgreSQL service types).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), + }, + "password": { + Description: "Password for authentication with the server where to migrate data from.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "Port number of the server where to migrate data from.", + Required: true, + Type: schema.TypeInt, + }, + "ssl": { + Default: true, + Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "username": { + Description: "User name for authentication with the server where to migrate data from.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "redis": { + Description: "Allow clients to connect to redis with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + "redis": { + Description: "Enable redis.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "redis": { + Description: "Allow clients to connect to redis from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "redis_acl_channels_default": { + Description: "Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"allchannels", "resetchannels"}, false), + }, + "redis_io_threads": { + Description: "Set Redis IO thread count. Changing this will cause a restart of the Redis service.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_lfu_decay_time": { + Default: 1, + Description: "LFU maxmemory-policy counter decay time in minutes. The default value is `1`.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_lfu_log_factor": { + Default: 10, + Description: "Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_maxmemory_policy": { + Default: "noeviction", + Description: "Redis maxmemory-policy. The default value is `noeviction`.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), + }, + "redis_notify_keyspace_events": { + Description: "Set notify-keyspace-events option.", + Optional: true, + Type: schema.TypeString, + }, + "redis_number_of_databases": { + Description: "Set number of Redis databases. Changing this will cause a restart of the Redis service.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_persistence": { + Description: "When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"off", "rdb"}, false), + }, + "redis_pubsub_client_output_buffer_limit": { + Description: "Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_ssl": { + Default: true, + Description: "Require SSL to access Redis. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "redis_timeout": { + Default: 300, + Description: "Redis idle connection timeout in seconds. The default value is `300`.", + Optional: true, + Type: schema.TypeInt, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/service.go b/internal/sdkprovider/userconfig/service/service.go new file mode 100644 index 000000000..0f42fcbba --- /dev/null +++ b/internal/sdkprovider/userconfig/service/service.go @@ -0,0 +1,40 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func GetUserConfig(kind string) *schema.Schema { + switch kind { + case "cassandra": + return cassandraUserConfig() + case "clickhouse": + return clickhouseUserConfig() + case "flink": + return flinkUserConfig() + case "grafana": + return grafanaUserConfig() + case "influxdb": + return influxdbUserConfig() + case "kafka": + return kafkaUserConfig() + case "kafka_connect": + return kafkaConnectUserConfig() + case "kafka_mirrormaker": + return kafkaMirrormakerUserConfig() + case "m3aggregator": + return m3AggregatorUserConfig() + case "m3db": + return m3DbUserConfig() + case "mysql": + return mysqlUserConfig() + case "opensearch": + return opensearchUserConfig() + case "pg": + return pgUserConfig() + case "redis": + return redisUserConfig() + default: + panic("unknown user config type: " + kind) + } +} diff --git a/main.go b/main.go index 952788618..df5035980 100644 --- a/main.go +++ b/main.go @@ -12,7 +12,7 @@ import ( ) //go:generate go test -tags userconfig ./internal/schemautil/userconfig -//go:generate go run ./ucgenerator/... --integrations clickhouse_kafka,clickhouse_postgresql,datadog,external_aws_cloudwatch_metrics,kafka_connect,kafka_logs,kafka_mirrormaker,logs,metrics +//go:generate go run ./ucgenerator/... --services cassandra,clickhouse,flink,grafana,influxdb,kafka,kafka_connect,kafka_mirrormaker,m3aggregator,m3db,mysql,opensearch,pg,redis // registryPrefix is the registry prefix for the provider. const registryPrefix = "registry.terraform.io/" diff --git a/ucgenerator/main.go b/ucgenerator/main.go index ef084010b..56f254a49 100644 --- a/ucgenerator/main.go +++ b/ucgenerator/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "go/format" "log" "os" "path/filepath" @@ -18,17 +17,12 @@ import ( ) const ( - destPath = "./internal/plugin/service/userconfig/" - localPrefix = "github.com/aiven/terraform-provider-aiven" - importDiag = "github.com/hashicorp/terraform-plugin-framework/diag" - importTypes = "github.com/hashicorp/terraform-plugin-framework/types" - importAttr = "github.com/hashicorp/terraform-plugin-framework/attr" - importSchemautil = "github.com/aiven/terraform-provider-aiven/internal/schemautil" - importResourceSchema = "github.com/hashicorp/terraform-plugin-framework/resource/schema" - importDatasourceSchema = "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - importSetValidator = "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" - importValidator = "github.com/hashicorp/terraform-plugin-framework/schema/validator" - codeGenerated = "Code generated by user config generator. DO NOT EDIT." + destPath = "./internal/sdkprovider/userconfig/" + localPrefix = "github.com/aiven/terraform-provider-aiven" + importSchema = "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + importDiff = "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff" + importValidation = "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + codeGenerated = "Code generated by user config generator. DO NOT EDIT." ) func main() { @@ -57,6 +51,7 @@ func main() { } func generate(kind string, data []byte, keys []string) error { + slices.Sort(keys) var root map[string]*object err := yaml.Unmarshal(data, &root) @@ -64,477 +59,205 @@ func generate(kind string, data []byte, keys []string) error { return err } - for key, o := range root { - if !slices.Contains(keys, key) { - continue + dirPath := filepath.Join(destPath, kind) + err = os.MkdirAll(dirPath, os.ModePerm) + if err != nil { + return err + } + + // Fixes imports order + imports.LocalPrefix = localPrefix + + doneKeys := make([]string, 0, len(keys)) + doneNames := make([]string, 0, len(keys)) + + for _, key := range keys { + o, ok := root[key] + if !ok { + return fmt.Errorf("key %q not found in spec", key) } - pkgName := strings.ReplaceAll(key, "_", "") o.isRoot = true - o.init("UserConfig") + o.init(key + "_user_config") + if o.Description == "" { + o.Description = toUpperFirst(o.camelName) + " user configurable settings" + } + + doneKeys = append(doneKeys, key) + doneNames = append(doneNames, o.camelName) - // Generates file - f := jen.NewFile(pkgName) + f := jen.NewFile(kind) f.HeaderComment(codeGenerated) - f.ImportAlias(importResourceSchema, "resource") - f.ImportAlias(importDatasourceSchema, "datasource") - genAllForObject(f, o) + f.ImportName(importSchema, "schema") + f.ImportName(importDiff, "diff") + f.ImportName(importValidation, "validation") + genSchema(f, o) // Sorts imports - imports.LocalPrefix = localPrefix b, err := imports.Process("", []byte(f.GoString()), nil) if err != nil { return err } // Saves file - dirPath := filepath.Join(destPath, kind, pkgName) - err = os.MkdirAll(dirPath, os.ModePerm) - if err != nil { - return err - } - err = os.WriteFile(filepath.Join(dirPath, key+".go"), b, 0644) if err != nil { return err } - - testFile, err := genTestFile(pkgName, o) - if err != nil { - return err - } - - testFileByte, err := format.Source([]byte(testFile)) - if err != nil { - return err - } - - err = os.WriteFile(filepath.Join(dirPath, key+"_test.go"), testFileByte, 0644) - if err != nil { - return err - } - } - return nil -} - -func genAllForObject(f *jen.File, o *object) { - genSchema(f, o, "Resource", importResourceSchema) - genSchema(f, o, "DataSource", importDatasourceSchema) - genTFObject(f, o) - genDTOObject(f, o) - genExpander(f, o) - genFlattener(f, o) - genAttrsMap(f, o) - - for _, p := range o.properties { - if p.isNestedBlock() { - if p.Type == objectTypeArray { - genAllForObject(f, p.ArrayItems) - } else { - genAllForObject(f, p) - } - } } - if !o.isRoot { - return + cases := make([]jen.Code, 0, len(keys)+1) + for i, k := range doneKeys { + cases = append(cases, jen.Case(jen.Lit(k)).Block( + jen.Return(jen.Id(doneNames[i]).Call()), + )) } - // Exports handy public functions for root object only - f.Op(` -// Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { - return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) -} + // Panics if unknown kind requested + cases = append(cases, jen.Default().Block(jen.Panic(jen.Lit("unknown user config type: ").Op("+").Id("kind")))) -// Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { - o := new(dtoUserConfig) - err := schemautil.MapToDTO(m, o) - if err != nil { - diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) - } - return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) -} -`) + f := jen.NewFile(kind) + f.HeaderComment(codeGenerated) + f.ImportName(importSchema, "schema") + f.Func().Id("GetUserConfig").Params(jen.Id("kind").String()).Op("*").Qual(importSchema, "Schema").Block( + jen.Switch(jen.Id("kind")).Block(cases...), + ) + return f.Save(filepath.Join(dirPath, kind+".go")) } -// genExpander creates function that unwraps TF object into json -func genExpander(f *jen.File, o *object) { - body := make([]jen.Code, 0) - props := jen.Dict{} - for _, p := range o.properties { - var value *jen.Statement - switch p.Type { - case objectTypeObject: - value = jen.Op(p.varName) - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( - jen.Id("ctx"), - jen.Id("diags"), - jen.Id("expand"+p.camelName), - jen.Id("o").Dot(p.camelName), - ) - body = append(body, v, ifErr()) - case objectTypeArray: - value = jen.Op(p.varName) - if p.ArrayItems.Type == objectTypeObject { - // It is a list of objects - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( - jen.Id("ctx"), - jen.Id("diags"), - jen.Id("expand"+p.camelName), - jen.Id("o").Dot(p.camelName), - ) - body = append(body, v, ifErr()) - } else { - // It is a list of scalars - // We don't want pointer scalars here - t := strings.ReplaceAll(getDTOType(p.ArrayItems), "*", "") - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSet").Types(jen.Id(t)).Call( - jen.Id("ctx"), - jen.Id("diags"), - jen.Id("o").Dot(p.camelName), - ) - body = append(body, v, ifErr()) - } - default: - if p.Required { - value = jen.Id("o").Dot(p.camelName).Dot(getTFTypeToValue(p)).Call() - } else { - // Own functions for casting values - value = jen.Qual(importSchemautil, getTFTypeToValue(p)).Call(jen.Id("o").Dot(p.camelName)) - } - } - - props[jen.Id(p.camelName)] = value - } - - // Function body + return statement - body = append( - body, - jen.Return(jen.Id("&"+o.dtoStructName).Values(props)), +func genSchema(f *jen.File, o *object) { + f.Func().Id(o.camelName).Params().Op("*").Qual(importSchema, "Schema").Block( + jen.Return(jen.Op("&").Qual(importSchema, "Schema").Values(getSchemaValues(o))), ) - - funcName := "expand" + o.camelName - f.Comment(funcName + " expands tf object into dto object") - f.Func().Id(funcName).Params( - jen.Id("ctx").Qual("context", "Context"), - jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"), - jen.Id("o").Op("*"+o.tfoStructName), - ).Id("*" + o.dtoStructName).Block(body...) } -// genFlattener creates function that unwraps json into TF object -func genFlattener(f *jen.File, o *object) { - body := make([]jen.Code, 0) - props := jen.Dict{} - for _, p := range o.properties { - var value *jen.Statement - switch p.Type { - case objectTypeObject: - value = jen.Op(p.varName) - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( - jen.Id("ctx"), - jen.Id("diags"), - jen.Id("flatten"+p.camelName), - jen.Id(p.attrsName), - jen.Id("o").Dot(p.camelName), - ) - body = append(body, v, ifErr()) - case objectTypeArray: - value = jen.Op(p.varName) - if p.ArrayItems.Type == objectTypeObject { - // It is a list of objects - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( - jen.Id("ctx"), - jen.Id("diags"), - jen.Id("flatten"+p.camelName), - jen.Id(p.attrsName), - jen.Id("o").Dot(p.camelName), - ) - body = append(body, v, ifErr()) - } else { - //It is a list of scalars - v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "SetValueFrom").Call( - jen.Id("ctx"), - jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type"), - jen.Id("o").Dot(p.camelName), - ) - body = append( - body, - v, - jen.Id("diags").Dot("Append").Call(jen.Id("d").Op("...")), - ifErr(), - ) - } - default: - value = jen.Qual(importTypes, getTFTypeFromValue(p)).Call(jen.Id("o").Dot(p.camelName)) - } +func getSchemaValues(o *object) jen.Dict { + values := make(jen.Dict) - if value == nil { - continue + if d := getDescription(o); d != "" { + for old, n := range replaceDescriptionSubStrings { + d = strings.ReplaceAll(d, old, n) } - - props[jen.Id(p.camelName)] = value + values[jen.Id("Description")] = jen.Lit(d) } - // Function body + return statement - body = append( - body, - jen.Return(jen.Id("&"+o.tfoStructName).Values(props)), - ) - - funcName := "flatten" + o.camelName - f.Comment(funcName + " flattens dto object into tf object") - f.Func().Id(funcName).Params( - jen.Id("ctx").Qual("context", "Context"), - jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"), - jen.Id("o").Op("*"+o.dtoStructName), - ).Id("*" + o.tfoStructName).Block(body...) -} - -// genAttrsMap creates attributes map for Flatten functions to "unwrap" response json into TF object -func genAttrsMap(f *jen.File, o *object) { - values := jen.Dict{} - for _, p := range o.properties { - key := jen.Lit(p.tfName) - switch p.Type { - case objectTypeArray, objectTypeObject: - var v jen.Code - if p.isNestedBlock() { - v = jen.Qual(importTypes, "ObjectType").Values(jen.Dict{ - jen.Id("AttrTypes"): jen.Id(p.attrsName), - }) - } else { - v = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") - } - values[key] = jen.Qual(importTypes, "SetType").Values(jen.Dict{jen.Id("ElemType"): v}) - default: - values[key] = jen.Qual(importTypes, getTFType(p)+"Type") + var t string + switch o.Type { + case objectTypeObject, objectTypeArray: + if o.isSchemaless() { + // todo: handle schemaless if this happens + log.Fatalf("schemaless is not implemented: %q", o.jsonName) } - } - f.Var().Id(o.attrsName).Op("=").Map(jen.String()).Qual(importAttr, "Type").Values(values) -} -// genTFObject creates TF object (for plan) -func genTFObject(f *jen.File, o *object) { - fields := make([]jen.Code, 0) - for _, p := range o.properties { - fields = append(fields, jen.Id(p.camelName).Qual(importTypes, getTFType(p)).Tag(map[string]string{"tfsdk": p.tfName})) - } - f.Comment(fmt.Sprintf("%s %s", o.tfoStructName, getDescription(o))) - f.Type().Id(o.tfoStructName).Struct(fields...) -} + t = "List" + if o.isArray() && o.ArrayItems.isScalar() { + // Stores scalars in the set type. + // Nested sets of objects do not work well in Terraform: + // - Changing a field shows diff for the whole object, + // because hash is calculated for the object, not per field. + // So no per-field updates, whole object replacement only. + // https://discuss.hashicorp.com/t/provider-schema-typeset-detect-changes/32546 + // - There is a bug that doesn't let you put a set deep inside ResourceData + // https://github.com/hashicorp/terraform-plugin-sdk/issues/459 + // - The diff itself is invalid for nested sets (not on the root level). + // It just doesn't work as expected in all cases. + t = "Set" + } -// genDTOObject creates DTO object to send over HTTP -func genDTOObject(f *jen.File, o *object) { - fields := make([]jen.Code, 0) - for _, p := range o.properties { - tags := map[string]string{"json": p.jsonName, "groups": "create"} - if !p.Required { - tags["json"] += ",omitempty" + if o.MinItems != nil { + values[jen.Id("MinItems")] = jen.Lit(*o.MinItems) } - if !p.CreateOnly { - tags["groups"] += ",update" + if o.MaxItems != nil { + values[jen.Id("MaxItems")] = jen.Lit(*o.MaxItems) } - fields = append(fields, jen.Id(p.camelName).Id(getDTOType(p)).Tag(tags)) + case objectTypeBoolean: + t = "Bool" + case objectTypeString: + t = "String" + case objectTypeInteger: + t = "Int" + case objectTypeNumber: + t = "Float" + default: + log.Fatalf("unknown type %q for %q", o.Type, o.jsonName) } - f.Comment(o.dtoStructName + " request/response object") - f.Type().Id(o.dtoStructName).Struct(fields...) -} -// genSchema generates TF schema. For root object only, i.e. RedisUserConfig -func genSchema(f *jen.File, o *object, name, pkg string) { - if !o.isRoot { - return + values[jen.Id("Type")] = jen.Qual(importSchema, "Type"+t) + if o.IsDeprecated { + if o.DeprecationNotice == "" { + log.Fatalf("missing deprecation notice for %q", o.jsonName) + } + values[jen.Id("Deprecated")] = jen.Lit(o.DeprecationNotice) } - funcName := fmt.Sprintf("New%sSchema", name) - f.Comment(fmt.Sprintf("%s returns %s schema", funcName, strings.ToLower(name))) - f.Func().Id(funcName).Params().Qual(pkg, "SetNestedBlock").Block( - jen.Return(getSchemaAttributes(o, pkg)), - ) -} - -func getSchemaAttributes(o *object, pkg string) jen.Code { - isResource := pkg == importResourceSchema - blocks := jen.Dict{} - attribs := jen.Dict{} - - // Array properties are its item properties - properties := o.properties - if o.Type == objectTypeArray { - properties = o.ArrayItems.properties + if o.CreateOnly { + values[jen.Id("ForceNew")] = jen.True() } - for _, p := range properties { - key := jen.Lit(p.tfName) - if p.isNestedBlock() { - blocks[key] = getSchemaAttributes(p, pkg) + // Doesn't mark with required or optional scalar elements of arrays + if !(o.isScalar() && o.parent.isArray()) { + if o.Required { + values[jen.Id("Required")] = jen.True() } else { - // For scalars - var value *jen.Statement - switch p.Type { - case objectTypeObject: - // Schemaless map - panic("schemaless objects are not supported") - case objectTypeArray: - value = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") - } - - values := getSchemaAttributeValues(p, isResource) - values[jen.Id("ElementType")] = value - attribs[jen.Lit(p.tfName)] = jen.Qual(pkg, getTFType(p)+"Attribute").Values(values) + values[jen.Id("Optional")] = jen.True() } } - nested := jen.Dict{} - if len(blocks) > 0 { - nested[jen.Id("Blocks")] = jen.Map(jen.String()).Qual(pkg, "Block").Values(blocks) - } - - if len(attribs) > 0 { - nested[jen.Id("Attributes")] = jen.Map(jen.String()).Qual(pkg, "Attribute").Values(attribs) - } - - values := getSchemaAttributeValues(o, isResource) - values[jen.Id("NestedObject")] = jen.Qual(pkg, "NestedBlockObject").Values(nested) - return jen.Qual(pkg, "SetNestedBlock").Values(values) -} - -func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { - a := jen.Dict{} - - if d := getDescription(o); d != "" { - a[jen.Id("Description")] = jen.Lit(d) - } - - if o.IsDeprecated { - a[jen.Id("DeprecationMessage")] = jen.Lit(fmt.Sprintf("%q is deprecated", o.tfName)) - } + if o.isScalar() { + // Default is for scalars only + // https://github.com/hashicorp/terraform-plugin-sdk/issues/142 + if o.Default != nil && !o.Required { + values[jen.Id("Default")] = scalarLit(o, o.Default) + } - validators := make([]jen.Code, 0) - if o.MinItems != nil { - validators = append(validators, valSizeAtLeast(*o.MinItems)) - } + if strings.Contains(o.jsonName, "api_key") || strings.Contains(o.jsonName, "password") { + values[jen.Id("Sensitive")] = jen.True() + } - if o.MaxItems != nil { - validators = append(validators, valSizeAtMost(*o.MaxItems)) - } + if o.Enum != nil { + args := make([]jen.Code, 0) + for _, v := range o.Enum { + args = append(args, scalarLit(o, v.Value)) + } - if !o.isNestedBlock() { - if !isResource { - a[jen.Id("Computed")] = jen.True() - } else { - if o.Required { - a[jen.Id("Required")] = jen.True() - } else { - a[jen.Id("Computed")] = jen.True() - a[jen.Id("Optional")] = jen.True() - - if o.Default != nil { - a[jen.Id("Default")] = getStaticDefault(o) - } + // There are no other types functions. + // Bool and number won't compile + switch o.Type { + case objectTypeString: + values[jen.Id("ValidateFunc")] = jen.Qual(importValidation, "StringInSlice").Call(scalarArrayLit(o, args), jen.False()) + case objectTypeInteger: + values[jen.Id("ValidateFunc")] = jen.Qual(importValidation, "IntInSlice").Call(scalarArrayLit(o, args)) } } - } - if len(validators) > 0 { - a[jen.Id("Validators")] = valValidatorSet(validators...) + return values } - return a -} - -// getTFType matches generator types into plugin types -func getTFType(o *object) string { - switch o.Type { - case objectTypeObject: - if o.isNestedBlock() { - return "Set" + if o.isArray() { + if o.ArrayItems.isScalar() { + fields := getSchemaValues(o.ArrayItems) + values[jen.Id("Elem")] = jen.Op("&").Qual(importSchema, "Schema").Values(fields) + return values } - return "Map" - case objectTypeArray: - return "Set" - case objectTypeString: - return "String" - case objectTypeBoolean: - return "Bool" - case objectTypeInteger: - return "Int64" - case objectTypeNumber: - return "Float64" - } - panic(fmt.Sprintf("Unknown type for %q", o.jsonName)) -} -func getTFTypeToValue(o *object) string { - v := getTFType(o) - if !o.Required { - return fmt.Sprintf("Value%sPointer", v) + // Renders the array as an object + o = o.ArrayItems } - return "Value" + v -} -func getTFTypeFromValue(o *object) string { - v := getTFType(o) - if !o.Required { - return v + "PointerValue" + if o.isRoot { + values[jen.Id("DiffSuppressFunc")] = jen.Qual(importDiff, "SuppressUnchanged") } - return v + "Value" -} -func getDTOType(o *object) string { - optional := "*" - if o.Required { - optional = "" + fields := make(jen.Dict) + for _, p := range o.properties { + fields[jen.Lit(p.tfName)] = jen.Values(getSchemaValues(p)) } - switch o.Type { - case objectTypeObject: - return "*" + o.dtoStructName - case objectTypeArray: - t := "[]" + getDTOType(o.ArrayItems) - if o.ArrayItems.Type == objectTypeObject { - return t - } - // We don't want pointer scalars in slice - return strings.ReplaceAll(t, "*", "") - case objectTypeString: - return optional + "string" - case objectTypeBoolean: - return optional + "bool" - case objectTypeInteger: - return optional + "int64" - case objectTypeNumber: - return optional + "float64" - } - panic(fmt.Sprintf("Unknown type for %q", o.jsonName)) -} + values[jen.Id("Elem")] = jen.Op("&").Qual(importSchema, "Resource").Values(jen.Dict{ + jen.Id("Schema"): jen.Map(jen.String()).Op("*").Qual(importSchema, "Schema").Values(fields), + }) -// getStaticDefault returns "default" value for given field -func getStaticDefault(o *object) *jen.Statement { - var v *jen.Statement - switch o.Type { - case objectTypeString: - v = jen.Lit(o.Default.(string)) - case objectTypeInteger: - d, err := strconv.Atoi(o.Default.(string)) - if err != nil { - return nil - } - v = jen.Lit(d) - case objectTypeNumber: - v = jen.Lit(o.Default.(float64)) - case objectTypeBoolean: - v = jen.Lit(o.Default.(bool)) - default: - return nil - } - d := getTFType(o) - i := fmt.Sprintf("%s/%sdefault", importResourceSchema, strings.ToLower(d)) - return jen.Qual(i, "Static"+d).Call(v) + return values } func getDescription(o *object) string { @@ -544,11 +267,13 @@ func getDescription(o *object) string { d = o.Title } + // Comes from the schema, quite confusing + d = strings.TrimSuffix(d, "The default value is `map[]`.") if d != "" { desc = append(desc, addDot(d)) } - if o.Default != nil && o.Type != objectTypeArray { + if o.isScalar() && o.Default != nil { desc = append(desc, fmt.Sprintf("The default value is `%v`.", o.Default)) } @@ -572,26 +297,36 @@ func addDot(s string) string { return s } -func getValidator(name string, v any) *jen.Statement { - return jen.Qual(importSetValidator, name).Call(jen.Lit(v)) +var replaceDescriptionSubStrings = map[string]string{ + "UserConfig": "", + "DEPRECATED: ": "", + "This setting is deprecated. ": "", + "[seconds]": "(seconds)", } -func valSizeAtLeast(n int) *jen.Statement { - return getValidator("SizeAtLeast", n) -} - -func valSizeAtMost(n int) *jen.Statement { - return getValidator("SizeAtMost", n) -} - -func valValidatorSet(c ...jen.Code) *jen.Statement { - return jen.Index().Qual(importValidator, "Set").Values(c...) -} - -func ifErr() *jen.Statement { - return jen.If(jen.Id("diags").Dot("HasError").Call()).Block(jen.Return(jen.Nil())) +func scalarLit(o *object, value any) *jen.Statement { + switch o.Type { + case objectTypeString: + return jen.Lit(value.(string)) + case objectTypeBoolean: + return jen.Lit(value.(bool)) + case objectTypeInteger: + n, _ := strconv.Atoi(value.(string)) + return jen.Lit(n) + case objectTypeNumber: + return jen.Lit(value.(float64)) + } + log.Fatalf("unknown scalar %v", o) + return nil } -func toPtr[T any](v T) *T { - return &v +func scalarArrayLit(o *object, args []jen.Code) *jen.Statement { + switch o.Type { + case objectTypeString: + return jen.Index().String().Values(args...) + case objectTypeInteger: + return jen.Index().Int().Values(args...) + } + log.Fatalf("unexpected element type of array for default value: %q", o.Type) + return nil } diff --git a/ucgenerator/models.go b/ucgenerator/models.go index 2b6942269..a5e27c303 100644 --- a/ucgenerator/models.go +++ b/ucgenerator/models.go @@ -1,9 +1,12 @@ package main import ( + "encoding/json" + "fmt" + "log" "strings" - "github.com/stoewer/go-strcase" + "github.com/iancoleman/strcase" "golang.org/x/exp/slices" ) @@ -33,9 +36,10 @@ type object struct { Type objectType `yaml:"-"` Required bool `yaml:"-"` - IsDeprecated bool `yaml:"is_deprecated"` - Default any `yaml:"default"` - Enum []*struct { + IsDeprecated bool `yaml:"is_deprecated"` + DeprecationNotice string `yaml:"deprecation_notice"` + Default any `yaml:"default"` + Enum []*struct { Value string `yaml:"value"` IsDeprecated bool `yaml:"is_deprecated"` } `yaml:"enum"` @@ -52,40 +56,29 @@ type object struct { Description string `yaml:"description"` Properties map[string]*object `yaml:"properties"` ArrayItems *object `yaml:"items"` + OneOf []*object `yaml:"one_of"` RequiredFields []string `yaml:"required"` CreateOnly bool `yaml:"create_only"` Nullable bool `yaml:"-"` } -func (o *object) isNestedBlock() bool { - switch o.Type { - case objectTypeObject: - return len(o.Properties) > 0 - case objectTypeArray: - switch o.ArrayItems.Type { - case objectTypeObject, objectTypeArray: - return true - } - } - return false -} - func (o *object) init(name string) { - o.jsonName = name - o.tfName = strings.ReplaceAll(name, ".", "__") o.camelName = toCamelCase(name) - - low := toLowerFirst(o.camelName) - o.varName = low + "Var" - o.attrsName = low + "Attrs" + o.varName = o.camelName + "Var" + o.attrsName = o.camelName + "Attrs" o.tfoStructName = "tfo" + o.camelName o.dtoStructName = "dto" + o.camelName + o.jsonName = name + o.tfName = strings.ReplaceAll(name, ".", "__dot__") + + unwrapArrayMultipleTypes(o) // Sorts properties, so they keep order on each generation keys := make([]string, 0, len(o.Properties)) for k := range o.Properties { keys = append(keys, k) } + slices.Sort(keys) for _, k := range keys { o.properties = append(o.properties, o.Properties[k]) @@ -107,36 +100,166 @@ func (o *object) init(name string) { if v, ok := o.OrigType.(string); ok { o.Type = objectType(v) } else if v, ok := o.OrigType.([]interface{}); ok { - o.Type = objectType(v[0].(string)) + types := 0 for _, t := range v { switch s := t.(string); s { case "null": o.Nullable = true default: o.Type = objectType(s) + types++ + if types > 1 { + log.Fatalf("%q has multiple types", name) + } } } } - if o.Type == objectTypeArray { + if o.isArray() { o.ArrayItems.parent = o o.ArrayItems.init(name) } - // In terraform objects are lists of one item - // Root item and properties should have max constraint - if o.Type == objectTypeObject { - if o.isRoot || o.parent != nil && o.parent.Type == objectTypeObject { - o.MaxItems = toPtr(1) + // In terraform objects are lists of one item. + // So we need to add a constraint + if o.isObject() { + one := 1 + o.MaxItems = &one + o.Default = nil + } + + if o.isArray() && o.ArrayItems.isObject() { + // In terraform object is a list of one object. + // So a real list with one object is the same. + // We need to see the difference for convert values to API + if o.MaxItems != nil && *o.MaxItems == 1 { + // As a fix, set nil to MaxItems + log.Fatalf("%q array with object element and MaxItems==1", name) } } + + // A fix that removes empty string default value + if o.Type == objectTypeString && o.Default != nil && o.Default.(string) == "" { + o.Default = nil + } +} + +func (o *object) isNestedBlock() bool { + switch o.Type { + case objectTypeObject: + return len(o.Properties) > 0 + case objectTypeArray: + return o.ArrayItems.isObject() || o.ArrayItems.isArray() + } + return false +} + +func (o *object) isObject() bool { + return o.Type == objectTypeObject +} + +func (o *object) isSchemaless() bool { + return o.isObject() && len(o.Properties) == 0 +} + +func (o *object) isArray() bool { + return o.Type == objectTypeArray +} + +func (o *object) isScalar() bool { + return !(o.isObject() || o.isArray()) +} + +func (o *object) ListProperties() []*object { + if o.isArray() { + return o.ArrayItems.properties + } + return o.properties } -// toCamelCase some fields has dots within, makes cleaner camelCase +// toCamelCase some fields have dots within, makes cleaner camelCase func toCamelCase(s string) string { - return strcase.UpperCamelCase(strings.ReplaceAll(s, ".", "_")) + return strcase.ToLowerCamel(strings.ReplaceAll(s, ".", "_")) +} + +func toUpperFirst(s string) string { + return strings.ToUpper(s[0:1]) + s[1:] } -func toLowerFirst(s string) string { - return strings.ToLower(s[0:1]) + s[1:] +func deepcopy(o *object) *object { + clone := new(object) + b, _ := json.Marshal(o) + _ = json.Unmarshal(b, clone) + return clone +} + +const deprecationNotice = "Deprecated. Use `%s` instead." + +// unwrapArrayMultipleTypes automatically unwraps multiple types ("type" as list or oneOf) for arrays. +// A "foo" field with types "string" and "object" unwrapped to three fields: foo, foo_string, foo_object +// First seen type becomes the default one and marked as deprecated. +// Because that's what happens to multi-typed fields: +// first they have one type, then new added, we split them into separate fields in terraform +// and deprecate the original field. +func unwrapArrayMultipleTypes(o *object) { + for key, p := range o.Properties { + // So far, array types unwrapped only + if p.ArrayItems == nil { + continue + } + + prefix := key + "_" + fields := make(map[string]*object) + + // Unwraps multiple _type names_, e.g. [string, object] + types, ok := p.ArrayItems.OrigType.([]interface{}) + if ok { + strTypes := make([]string, 0) + for _, t := range types { + if s := t.(string); s != "null" { + strTypes = append(strTypes, s) + } + } + + if len(strTypes) == 1 { + continue + } + + // Multiple types. + // This ArrayItems object is composite: + // it has properties for the object type, and MaxLength for the string type. + // So it just copies it and sets type explicitly. + for _, s := range strTypes { + clone := deepcopy(p) + clone.ArrayItems.OrigType = s + fields[prefix+s] = clone + } + + p.IsDeprecated = true + p.DeprecationNotice = fmt.Sprintf(deprecationNotice, prefix+strTypes[0]) + p.ArrayItems.OrigType = strTypes[0] + fields[key] = p + + } else if len(p.ArrayItems.OneOf) != 0 { + // Unwraps multiple _type objects_, e.g. [{type:string}, {type: object}] + for i := range p.ArrayItems.OneOf { + t := p.ArrayItems.OneOf[i] + clone := deepcopy(p) + clone.ArrayItems = t + clone.Description = fmt.Sprintf("%s %s", addDot(p.Description), t.Description) + fields[prefix+t.OrigType.(string)] = clone + } + + // First seen type in priority. Replaces the original object + priorityType := prefix + p.ArrayItems.OneOf[0].OrigType.(string) + orig := deepcopy(fields[priorityType]) + orig.DeprecationNotice = fmt.Sprintf(deprecationNotice, priorityType) + orig.IsDeprecated = true + fields[key] = orig + } + + for k, c := range fields { + o.Properties[k] = c + } + } }